gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import logging
import xmpp
import re
import time
from cylon.conf import Settings
from cylon.command import Loader
from cylon.plugin import Plugin
from cylon.hook import Hook
from optparse import OptionParser
class Cylon:
MSG_OK = 'Accept my fraking request. Now.'
MSG_KO = 'DENIED.'
def __init__(self):
self._conn = None
self._parser = OptionParser()
self._parser.add_option("-c", "--conf_file", dest="conf_file",
default="/etc/cylon.yml",
help="use configuration file", )
self._parser.add_option("-l", "--log_file", dest="log_file",
default="/var/log/cylon.log",
help="use specified file to write logs")
self._parser.add_option("-D", "--debug", dest="debug",
action="store_false", default=True,
help="Debug mode into console")
(self._options, self._args) = self._parser.parse_args()
logging.basicConfig(filename=self._options.log_file,
level=logging.INFO)
logging.info("Starting Cylon !")
self._settings = Settings(self._options.conf_file)
# hooks
if not self._settings.loaded_hooks_at_start:
self._hooks = None
else:
hooks = Loader.get_hooks(self._settings.plugin_dir,
self._settings.loaded_hooks_at_start)
self._hooks = hooks
Hook.hooks = self._hooks
Hook.settings = self._settings
# plugins
if not hasattr(self._settings, 'plugin_aliases'):
self._settings.plugin_aliases = {}
if not self._settings.loaded_plugins_at_start:
self._modules = self._aliases = {'publics' : {}, 'privates' : {}}
else:
modules = Loader.get_modules(self._settings.plugin_dir,
self._settings.loaded_plugins_at_start,
self._settings.plugin_aliases)
self._modules = modules[0]
self._aliases = modules[1]
built = Loader.get_builtins()
self._modules['publics'].update(built['publics'])
self._modules['privates'].update(built['privates'])
Plugin.modules = self._modules
Plugin.settings = self._settings
# status
self.__status = None
if hasattr(self._settings, 'default_status'):
self.__status = self._settings.default_status
# connection
self.__connect()
Plugin.connection = self._conn
if self._hooks != None:
Hook.connection = self._conn
self.__run()
def message_handler(self, conn, mess):
if xmpp.NS_DELAY in mess.getProperties(): return
sender = mess.getFrom().getResource()
if sender == self._settings.chat_name: return
body = mess.getBody()
if not body: return
prefixed = mess.getBody().startswith("%s "
% self._settings.command_prefix)
# hooks
if (prefixed == False) and (self._hooks != None):
for hook in self._hooks:
for regex, func in self._hooks[hook].regex:
res = regex.search(mess.getBody())
if res != None:
try:
msg = getattr(self._hooks[hook], func)(mess.getBody(), mess.getFrom(), res)
except:
msg = "%s hook execution error" % hook_name
if msg:
logging.debug(msg)
if mess.getType() == "groupchat":
self.send(str(mess.getFrom()).split('/')[0], msg, "groupchat")
else:
self.send(mess.getFrom(), msg, "chat")
# plugins
modules = {}
aliases = {}
modules.update(self._modules['publics'])
aliases.update(self._aliases['publics'])
if mess.getType() == "groupchat":
Plugin.request_is_private = False
muc_from = str(mess.getFrom())
reply_to = muc_from.split('/')[0]
msg_type = "groupchat"
else:
Plugin.request_is_private = True
reply_to = mess.getFrom()
msg_type = "chat"
if str(mess.getFrom()).split('/')[0] in self._settings.master_names:
modules.update(self._modules['privates'])
aliases.update(self._aliases['privates'])
if prefixed or (mess.getType() == "chat"):
cmd = mess.getBody()
if prefixed:
length = self._settings.command_prefix.__len__()
cmd = cmd[length + 1:]
logging.info(cmd)
cmd_parameters = cmd.split()
plugin_name = cmd_parameters[0]
if modules.has_key(plugin_name) or aliases.has_key(plugin_name):
if aliases.has_key(plugin_name):
func = aliases[plugin_name].keys()[0]
inst = aliases[plugin_name][func]
cmd_parameters.pop(0)
try:
msg = self.__call_plugin(mess, inst, func, cmd_parameters)
except AttributeError, e:
msg = "Function %s not implemented." % func
logging.error("%s plugin exec: %s" % (class_, str(e)))
else:
try:
class_ = cmd_parameters.pop(0)
inst = modules[class_]
if not cmd_parameters:
func = "default"
else:
func = cmd_parameters.pop(0)
# Way to test if class exists.If exception, error msg.
method = getattr(inst, func)
msg = self.__call_plugin(mess, inst, func, cmd_parameters)
except AttributeError, e:
msg = "Function %s not implemented." % func
logging.error("%s plugin exec: %s" % (class_, str(e)))
else:
msg = "Command '%s': not found." % plugin_name
if not msg:
# When a module doesn't return str.
msg = "Hmm. Problem(s) during command execution. (Null return)."
self.send(reply_to, msg, msg_type)
def __call_plugin(self, xmpp_mess, class_, func, param):
try:
msg = class_.wrapper(func, xmpp_mess.getBody(),
xmpp_mess.getFrom(), xmpp_mess.getType(),
param)
except Exception, e:
msg = "Error during %s function execution." % func
logging.error("%s plugin exec: %s" % (class_, str(e)))
return msg
def send(self, user, text, mess_type='chat'):
mess = self.build_message(text)
mess.setTo(user)
mess.setType(mess_type)
self._conn.send(mess)
def build_message(self, text):
text_plain = re.sub(r'<[^>]+>', '', text)
message = xmpp.protocol.Message(body=text_plain)
if text_plain != text:
html = xmpp.Node('html', {'xmlns': 'http://jabber.org/protocol/xhtml-im'})
try:
html.addChild(node=xmpp.simplexml.XML2Node("<body xmlns='http://www.w3.org/1999/xhtml'>" +
text.encode('utf-8') + "</body>"))
message.addChild(node=html)
except Exception:
message = xmpp.protocol.Message(body=text_plain)
return message
def presence_handler(self, conn, presence):
jid, ptype, status = presence.getFrom(), \
presence.getType(), \
presence.getStatus()
if self._jid.bareMatch(jid): return
try:
if jid in self._settings.master_names:
subscription = self.roster.getSubscription(str(jid))
else:
subscription = None
except KeyError:
# User not on our roster
subscription = None
if ptype == 'error': logging.error(presence.getError())
logging.debug("Presence for %s (type: %s, status: %s, subscription: %s)" %
(jid, ptype, status, subscription))
if (ptype == 'subscribe') and (jid in self._settings.master_names):
# Incoming presence subscription request
if subscription in ('to', 'both', 'from'):
self.roster.Authorize(jid)
self._conn.send(xmpp.dispatcher.Presence(show=None,
status=self.__status))
if subscription not in ('to', 'both'):
self.roster.Subscribe(jid)
if subscription in (None, 'none'):
self.send(jid, self.MSG_OK)
elif ptype == 'subscribed':
# Authorize any pending requests for that JID
self.roster.Authorize(jid)
elif ptype == 'unsubscribed':
# Authorization was not granted
self.send(jid, self.MSG_KO)
self.roster.Unauthorize(jid)
def __connect(self):
self._jid = xmpp.JID(self._settings.jid)
if not self._conn:
if not self._options.debug:
conn = xmpp.Client(self._jid.getDomain())
else:
conn = xmpp.Client(self._jid.getDomain(), debug=[])
res = conn.connect()
if not res:
logging.error("Unable to connect to server %s." %
self._jid.getDomain())
exit()
if res<>'tls':
logging.warning("Unable to establish TLS connection.")
res = conn.auth(self._jid.getNode(),
self._settings.password,
self._settings.chat_name)
if not res:
logging.error("Unable to authenticate this connection.")
exit()
if res<>'sasl':
logging.warning("Unable to get SASL creditential for: %s." %
self.jid.getDomain())
conn.RegisterHandler('message', self.message_handler)
conn.RegisterHandler('presence', self.presence_handler)
conn.sendInitPresence()
self.roster = conn.Roster.getRoster()
self._conn = conn
if hasattr(self._settings, 'default_status'):
self._conn.send(xmpp.Presence(status=self._settings.default_status))
if hasattr(self._settings, 'groupchat'): self.__join_muc()
def __join_muc(self):
for room_config in self._settings.groupchat:
if isinstance(room_config, dict):
for k, v in room_config.iteritems():
presence = xmpp.Presence(to="%s/%s" % (k, self._settings.chat_name))
presence.setTag('x', namespace='http://jabber.org/protocol/muc').setTagData('password',v)
else:
presence = xmpp.Presence(to="%s/%s" % (room_config, self._settings.chat_name))
self._conn.send(presence)
def __run(self):
retries = 0
while True:
try:
if not self._conn.isConnected():
logging.info('Bot not connected, reconnecting...')
self._conn.reconnectAndReauth()
self._conn.RegisterHandler('message', self.message_handler)
self._conn.RegisterHandler('presence', self.presence_handler)
self._conn.sendInitPresence()
self.roster = self._conn.Roster.getRoster()
if hasattr(self._settings, 'default_status'):
self._conn.send(xmpp.Presence(status=self._settings.default_status))
if hasattr(self._settings, 'groupchat'): self.__join_muc()
self._conn.Process(1)
except KeyboardInterrupt:
logging.info('Signal catched, shutting down.')
break
except:
logging.error('Unexpected error')
if retries <= 3:
retries += 1
time.sleep(2)
continue
else:
break
logging.info('Exiting. Bye.')
exit()
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014, imageio contributors
# imageio is distributed under the terms of the (new) BSD License.
"""
Various utilities for imageio
"""
from __future__ import absolute_import, print_function, division
import re
import os
import sys
import time
import numpy as np
IS_PYPY = '__pypy__' in sys.builtin_module_names
# Taken from six.py
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
text_type = str
binary_type = bytes
else: # pragma: no cover
string_types = basestring, # noqa
text_type = unicode # noqa
binary_type = str
def urlopen(*args, **kwargs):
""" Compatibility function for the urlopen function. Raises an
RuntimeError if urlopen could not be imported (which can occur in
frozen applications.
"""
try:
from urllib2 import urlopen
except ImportError:
try:
from urllib.request import urlopen # Py3k
except ImportError:
raise RuntimeError('Could not import urlopen.')
return urlopen(*args, **kwargs)
# currently not used ... the only use it to easly provide the global meta info
class ImageList(list):
def __init__(self, meta=None):
list.__init__(self)
# Check
if not (meta is None or isinstance(meta, dict)):
raise ValueError('ImageList expects meta data to be a dict.')
# Convert and return
self._meta = meta if meta is not None else {}
@property
def meta(self):
""" The dict with the meta data of this image.
"""
return self._meta
class Image(np.ndarray):
""" Image(array, meta=None)
A subclass of np.ndarray that has a meta attribute.
Following scikit-image, we leave this as a normal numpy array as much
as we can.
"""
def __new__(cls, array, meta=None):
# Check
if not isinstance(array, np.ndarray):
raise ValueError('Image expects a numpy array.')
if not (meta is None or isinstance(meta, dict)):
raise ValueError('Image expects meta data to be a dict.')
# Convert and return
meta = meta if meta is not None else {}
try:
ob = array.view(cls)
except AttributeError: # pragma: no cover
# Just return the original; no metadata on the array in Pypy!
return array
ob._copy_meta(meta)
return ob
def _copy_meta(self, meta):
""" Make a 2-level deep copy of the meta dictionary.
"""
self._meta = Dict()
for key, val in meta.items():
if isinstance(val, dict):
val = Dict(val) # Copy this level
self._meta[key] = val
@property
def meta(self):
""" The dict with the meta data of this image.
"""
return self._meta
def __array_finalize__(self, ob):
""" So the meta info is maintained when doing calculations with
the array.
"""
if isinstance(ob, Image):
self._copy_meta(ob.meta)
else:
self._copy_meta({})
def __array_wrap__(self, out, context=None):
""" So that we return a native numpy array (or scalar) when a
reducting ufunc is applied (such as sum(), std(), etc.)
"""
if not out.shape:
return out.dtype.type(out) # Scalar
elif out.shape != self.shape:
return np.asarray(out)
else:
return out # Type Image
try:
from collections import OrderedDict as _dict
except ImportError:
_dict = dict
class Dict(_dict):
""" A dict in which the keys can be get and set as if they were
attributes. Very convenient in combination with autocompletion.
This Dict still behaves as much as possible as a normal dict, and
keys can be anything that are otherwise valid keys. However,
keys that are not valid identifiers or that are names of the dict
class (such as 'items' and 'copy') cannot be get/set as attributes.
"""
__reserved_names__ = dir(_dict()) # Also from OrderedDict
__pure_names__ = dir(dict())
def __getattribute__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
if key in self:
return self[key]
else:
raise
def __setattr__(self, key, val):
if key in Dict.__reserved_names__:
# Either let OrderedDict do its work, or disallow
if key not in Dict.__pure_names__:
return _dict.__setattr__(self, key, val)
else:
raise AttributeError('Reserved name, this key can only ' +
'be set via ``d[%r] = X``' % key)
else:
# if isinstance(val, dict): val = Dict(val) -> no, makes a copy!
self[key] = val
def __dir__(self):
isidentifier = lambda x: bool(re.match(r'[a-z_]\w*$', x, re.I))
names = [k for k in self.keys() if
(isinstance(k, string_types) and isidentifier(k))]
return Dict.__reserved_names__ + names
class BaseProgressIndicator:
""" BaseProgressIndicator(name)
A progress indicator helps display the progres of a task to the
user. Progress can be pending, running, finished or failed.
Each task has:
* a name - a short description of what needs to be done.
* an action - the current action in performing the task (e.g. a subtask)
* progress - how far the task is completed
* max - max number of progress units. If 0, the progress is indefinite
* unit - the units in which the progress is counted
* status - 0: pending, 1: in progress, 2: finished, 3: failed
This class defines an abstract interface. Subclasses should implement
_start, _stop, _update_progress(progressText), _write(message).
"""
def __init__(self, name):
self._name = name
self._action = ''
self._unit = ''
self._max = 0
self._status = 0
self._last_progress_update = 0
def start(self, action='', unit='', max=0):
""" start(action='', unit='', max=0)
Start the progress. Optionally specify an action, a unit,
and a maxium progress value.
"""
if self._status == 1:
self.finish()
self._action = action
self._unit = unit
self._max = max
#
self._progress = 0
self._status = 1
self._start()
def status(self):
""" status()
Get the status of the progress - 0: pending, 1: in progress,
2: finished, 3: failed
"""
return self._status
def set_progress(self, progress=0, force=False):
""" set_progress(progress=0, force=False)
Set the current progress. To avoid unnecessary progress updates
this will only have a visual effect if the time since the last
update is > 0.1 seconds, or if force is True.
"""
self._progress = progress
# Update or not?
if not (force or (time.time() - self._last_progress_update > 0.1)):
return
self._last_progress_update = time.time()
# Compose new string
unit = self._unit or ''
progressText = ''
if unit == '%':
progressText = '%2.1f%%' % progress
elif self._max > 0:
percent = 100 * float(progress) / self._max
progressText = '%i/%i %s (%2.1f%%)' % (progress, self._max, unit,
percent)
elif progress > 0:
if isinstance(progress, float):
progressText = '%0.4g %s' % (progress, unit)
else:
progressText = '%i %s' % (progress, unit)
# Update
self._update_progress(progressText)
def increase_progress(self, extra_progress):
""" increase_progress(extra_progress)
Increase the progress by a certain amount.
"""
self.set_progress(self._progress + extra_progress)
def finish(self, message=None):
""" finish(message=None)
Finish the progress, optionally specifying a message. This will
not set the progress to the maximum.
"""
self.set_progress(self._progress, True) # fore update
self._status = 2
self._stop()
if message is not None:
self._write(message)
def fail(self, message=None):
""" fail(message=None)
Stop the progress with a failure, optionally specifying a message.
"""
self.set_progress(self._progress, True) # fore update
self._status = 3
self._stop()
message = 'FAIL ' + (message or '')
self._write(message)
def write(self, message):
""" write(message)
Write a message during progress (such as a warning).
"""
if self.__class__ == BaseProgressIndicator:
# When this class is used as a dummy, print explicit message
print(message)
else:
return self._write(message)
# Implementing classes should implement these
def _start(self):
pass
def _stop(self):
pass
def _update_progress(self, progressText):
pass
def _write(self, message):
pass
class StdoutProgressIndicator(BaseProgressIndicator):
""" StdoutProgressIndicator(name)
A progress indicator that shows the progress in stdout. It
assumes that the tty can appropriately deal with backspace
characters.
"""
def _start(self):
self._chars_prefix, self._chars = '', ''
# Write message
if self._action:
self._chars_prefix = '%s (%s): ' % (self._name, self._action)
else:
self._chars_prefix = '%s: ' % self._name
sys.stdout.write(self._chars_prefix)
sys.stdout.flush()
def _update_progress(self, progressText):
# If progress is unknown, at least make something move
if not progressText:
i1, i2, i3, i4 = '-\\|/'
M = {i1: i2, i2: i3, i3: i4, i4: i1}
progressText = M.get(self._chars, i1)
# Store new string and write
delChars = '\b'*len(self._chars)
self._chars = progressText
sys.stdout.write(delChars+self._chars)
sys.stdout.flush()
def _stop(self):
self._chars = self._chars_prefix = ''
sys.stdout.write('\n')
sys.stdout.flush()
def _write(self, message):
# Write message
delChars = '\b'*len(self._chars_prefix+self._chars)
sys.stdout.write(delChars+' '+message+'\n')
# Reprint progress text
sys.stdout.write(self._chars_prefix+self._chars)
sys.stdout.flush()
# From pyzolib/paths.py (https://bitbucket.org/pyzo/pyzolib/src/tip/paths.py)
def appdata_dir(appname=None, roaming=False):
""" appdata_dir(appname=None, roaming=False)
Get the path to the application directory, where applications are allowed
to write user specific files (e.g. configurations). For non-user specific
data, consider using common_appdata_dir().
If appname is given, a subdir is appended (and created if necessary).
If roaming is True, will prefer a roaming directory (Windows Vista/7).
"""
# Define default user directory
userDir = os.path.expanduser('~')
# Get system app data dir
path = None
if sys.platform.startswith('win'):
path1, path2 = os.getenv('LOCALAPPDATA'), os.getenv('APPDATA')
path = (path2 or path1) if roaming else (path1 or path2)
elif sys.platform.startswith('darwin'):
path = os.path.join(userDir, 'Library', 'Application Support')
# On Linux and as fallback
if not (path and os.path.isdir(path)):
path = userDir
# Maybe we should store things local to the executable (in case of a
# portable distro or a frozen application that wants to be portable)
prefix = sys.prefix
if getattr(sys, 'frozen', None):
prefix = os.path.abspath(os.path.dirname(sys.path[0]))
for reldir in ('settings', '../settings'):
localpath = os.path.abspath(os.path.join(prefix, reldir))
if os.path.isdir(localpath): # pragma: no cover
try:
open(os.path.join(localpath, 'test.write'), 'wb').close()
os.remove(os.path.join(localpath, 'test.write'))
except IOError:
pass # We cannot write in this directory
else:
path = localpath
break
# Get path specific for this app
if appname:
if path == userDir:
appname = '.' + appname.lstrip('.') # Make it a hidden directory
path = os.path.join(path, appname)
if not os.path.isdir(path): # pragma: no cover
os.mkdir(path)
# Done
return path
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# Copyright 2014 Steve Huang
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Author: Steve Huang ([email protected])
Initial Date: July 2014
Module Description:
TBD
"""
import os
import re
import string
import inspect
import common.log as logging
import ConfigParser
import argparse
from types import *
logger = logging.getLogger()
def _handle_file_(x):
if not os.path.isfile(x):
return open(x, 'w')
else:
return open(x, 'rw')
# to use this module, please use
# from Parameters import *
class Opt (object):
def __init__ (self, group='app', name=None, short=None, default=None, help=None, sub_group=''):
self.group = group
self.name = name
self.short = short
self.value = default
self.help = help
self.type = None
self.sub_group = sub_group
def add_opt_to_cli (self, parser):
cli_name = self.name
namelist = list()
if (self.short is not None):
namelist = [ "-" + self.short, "--" + cli_name]
namelist.append("--" + cli_name)
kwargs = { 'type' : self.type,
'help' : self.help,
'default': self._get_(),
'nargs': '?'
}
if self.type == ListType:
kwargs['type']=StringType
kwargs['nargs']='+'
if self.type == FileType:
kwargs['type']=_handle_file_
parser.add_argument(*namelist, **kwargs)
def _get_ (self):
return None
def _set_ (self, value):
if type(value) == self.type:
self.value = value
else:
raise TypeError("Expected " + str(self.type))
class DirOpt(Opt):
def __init__ (self, group='app', name=None, short=None, default=None, help=None, sub_group=''):
default = os.path.expanduser(default) if default is not None else None
default = os.path.expandvars(default) if default is not None else None
#os.path.isdir(default)
super(DirOpt,self).__init__(group, name, short, default, help, sub_group)
self.type = StringType
def _get_ (self):
return self.value
class FileOpt(Opt):
def __init__ (self, group='app', name=None, short=None, default=None, help=None, sub_group=''):
default = os.path.expanduser(default) if default is not None else None
default = os.path.expandvars(default) if default is not None else None
super(FileOpt,self).__init__(group, name, short, default, help, sub_group)
self.type = FileType
def _get_ (self):
if self.value is not None:
if type(self.value) is FileType:
return self.value.name
else:
return self.value
return None
def _set_ (self, value):
try:
super(FileOpt, self)._set_(value)
except TypeError as e:
if type(value) == StringType:
self.value = FileType(value)
else:
raise TypeError("Expected " + str(self.type))
class IntOpt(Opt):
def __init__ (self, group='app', name=None, short=None, default=None, help=None, sub_group=''):
super(IntOpt,self).__init__(group, name, short, default, help, sub_group)
self.type = IntType
def _get_ (self):
return self.value
class StrOpt(Opt):
def __init__ (self, group='app', name=None, short=None, default=None, help=None, sub_group=''):
super(StrOpt,self).__init__(group, name, short, default, help, sub_group)
self.type = StringType
def _get_ (self):
return self.value
class ListOpt(Opt):
def __init__ (self, group='app', name=None, short=None, default=None, help=None, sub_group=''):
super(ListOpt,self).__init__(group, name, short, default, help, sub_group)
self.type = ListType
def _get_ (self):
return self.value
class ConfigOptions:
def __init__(self):
# dictionary with (group,name) as the key
# each item is an option (class Opt)
self.groupOpts = dict()
#self.args = list()
self.parser = dict()
self.parser['app'] = argparse.ArgumentParser(description='options for GarageEye program', add_help=False)
self.parser['app'].add_argument('-h', '--help', action='store_true', help='show this help message and exit', default=False)
def __getattr__(self, name):
for key, opt in self.groupOpts.iteritems():
if name in key:
return opt._get_()
return None
def _add_opts_to_cli_list_ (self):
# create a parser cli list from the groupOpts list
for key,opt in self.groupOpts.iteritems():
opt.add_opt_to_cli(self.parser[key[0]])
def _parse_config_files_ (self, filename):
# parse
ini_parser = ConfigParser.ConfigParser()
ini_parser.read(filename)
args = dict()
for section in ini_parser.sections():
items = ini_parser.items(section)
for name, value in items:
if not section in args:
args[section] = list()
args[section].append('--' + name)
args[section].extend(value.split())
ini_parser = None
return args
def parseArgs (self, args=None, config_files=None, validate_values=False):
# build a list of the cli options available from the current list
# check if arg is config_file parameter. if so, break to the else
# and parse the file. put the config parms into the self._args list
# expand vars and user of all strings
if len(args)==0:
return None
cli_args = dict()
temp_args = list()
self._add_opts_to_cli_list_() # fill in the parsers based on opts
consume_conf = False
for index, arg in enumerate(args):
if arg == '--config_file' or arg.startswith('--config_file') or arg == '-c':
consume_conf=True
continue
if consume_conf==True and arg.endswith('.conf'):
items = self._parse_config_files_(args[index])
for groupname in items:
items[groupname] = [os.path.expanduser(x) for x in items[groupname]]
items[groupname] = [os.path.expandvars(x) for x in items[groupname]]
if not groupname in cli_args:
cli_args[groupname] = list()
cli_args[groupname].extend(items[groupname])
consume_conf=False
elif consume_conf==False:
val = os.path.expanduser(args[index])
val = os.path.expandvars(val)
temp_args.append(val)
else: # this means the conf file is not the right extension
logger.warning("Expected file with conf extension. File not parsed.")
# split the arguments into the groups
group_found = False
group_Name = 'app'
for arg in temp_args:
if arg=="--group" or arg.startswith('--group') or arg=='-g':
group_found = True
print "group found"
continue
if group_found == True:
group_Name = arg
print "group name set to " + group_Name
group_found = False
continue
if not group_Name in cli_args:
cli_args[group_Name] = list()
cli_args[group_Name].append(arg)
# parse all the parameters
known_args = dict()
unknown_args = dict()
for groupname in self.parser:
if groupname in cli_args:
known, unknown = self.parser[groupname].parse_known_args(cli_args[groupname])
if not groupname in known_args:
known_args[groupname] = known
unknown_args[groupname] = unknown
else:
known_args[groupname].update(known)
unknown_args[groupname].update(unknown)
# if help was called, print out
if known_args['app'].help:
for groupname in self.parser:
self.parser[groupname].print_help()
self.parser['app'].exit()
#synch the namespace values with groupOpts
for groupname in known_args:
for name, value in vars(known_args[groupname]).iteritems():
if self.groupOpts.has_key((groupname,name)):
self.groupOpts[(groupname,name)]._set_(value)
else:
if (name != 'help'):
logger.warning("Missing key pair (%s, %s)" % (name,value)) # 'help' will not be here. it's expected
#print ("Missing key pair (%s, %s)" % (name,value))
return known_args
def _log_options_ (self):
logger.debug('Dumping registered configuation options')
for key, opt in self.groupOpts.iteritems():
logger.debug("%s = %s" % (key, str(opt._get_())))
def registerOpt (self, options=None):
if (options is None):
raise Exception('options is set to None')
if (len(options)==0):
raise Exception('nothing in options list')
if type(options) is list:
for opt in options:
# set the opt.module to the caller's __name__
#frm = inspect.stack()[1]
#mod = inspect.getmodule(frm[0])
#opt.module = mod.__module__
#logger.debug ("opt.module = %s" % mod)
# check if the opt.name in the group exists.
groupname = opt.group
if (groupname,opt.name) in self.groupOpts:
logger.warning("The name %s already exists. Option will be overwritten." % opt.name)
self.groupOpts[(groupname,opt.name)] = opt
if not (groupname in self.parser):
self.parser[groupname] = argparse.ArgumentParser(description='options for the group ' + groupname, add_help=False)
else:
# the group does not exist. Add the first (key, item) pair
self.groupOpts[groupname, opt.name]=opt
if not (groupname in self.parser):
self.parser[groupname] = argparse.ArgumentParser(description='options for the group '+ groupname, add_help=False)
#print self.groupOpts.items()
def importOpt (self, module, name, group='app'):
# check if the (group,name) pair exists in the dictionary
__import__(module)
if (group, name) in self.groupOpts:
return self.groupOpts[(group, name)]._get_()
else:
raise ValueError("Missing pair (%s, %s) " % (group, name))
return None
def get_opt_list (self, group='app'):
rval = []
for key, opt in self.groupOpts.iteritems():
if group in key:
rval.append(key[1])
return rval
Config = ConfigOptions()
|
|
# -*- coding: utf-8 -*-
"""
Defines the fields that can be added to redisco models.
"""
import time
import sys
from datetime import datetime, date, timedelta
from dateutil.tz import tzutc, tzlocal
from calendar import timegm
from redisco.containers import List
from .exceptions import FieldValidationError, MissingID
__all__ = ['Attribute', 'CharField', 'ListField', 'DateTimeField',
'DateField', 'TimeDeltaField', 'ReferenceField', 'Collection',
'IntegerField', 'FloatField', 'BooleanField', 'Counter',
'ZINDEXABLE']
class Attribute(object):
"""Defines an attribute of the model.
The attribute accepts strings and are stored in Redis as
they are - strings.
Options
name -- alternate name of the attribute. This will be used
as the key to use when interacting with Redis.
indexed -- Index this attribute. Unindexed attributes cannot
be used in queries. Default: True.
unique -- validates the uniqueness of the value of the
attribute.
validator -- a callable that can validate the value of the
attribute.
default -- Initial value of the attribute.
"""
def __init__(self,
name=None,
indexed=True,
required=False,
validator=None,
unique=False,
default=None):
self.name = name
self.indexed = indexed
self.required = required
self.validator = validator
self.default = default
self.unique = unique
def __get__(self, instance, owner):
try:
return getattr(instance, '_' + self.name)
except AttributeError:
if callable(self.default):
default = self.default()
else:
default = self.default
self.__set__(instance, default)
return default
def __set__(self, instance, value):
setattr(instance, '_' + self.name, value)
def typecast_for_read(self, value):
"""Typecasts the value for reading from Redis."""
# The redis client encodes all unicode data to utf-8 by default.
return value.decode('utf-8')
def typecast_for_storage(self, value):
"""Typecasts the value for storing to Redis."""
try:
return unicode(value)
except UnicodeError:
return value.decode('utf-8')
def value_type(self):
return unicode
def acceptable_types(self):
return basestring
def validate(self, instance):
val = getattr(instance, self.name)
errors = []
# type_validation
if val is not None and not isinstance(val, self.acceptable_types()):
errors.append((self.name, 'bad type',))
# validate first standard stuff
if self.required:
if val is None or not unicode(val).strip():
errors.append((self.name, 'required'))
# validate uniquness
if val and self.unique:
error = self.validate_uniqueness(instance, val)
if error:
errors.append(error)
# validate using validator
if self.validator:
r = self.validator(self.name, val)
if r:
errors.extend(r)
if errors:
raise FieldValidationError(errors)
def validate_uniqueness(self, instance, val):
encoded = self.typecast_for_storage(val)
matches = instance.__class__.objects.filter(**{self.name: encoded})
if len(matches) > 0:
try:
instance_id = instance.id
no_id = False
except MissingID:
no_id = True
if (len(matches) != 1) or no_id or (matches.first().id != instance.id):
return (self.name, 'not unique',)
class CharField(Attribute):
def __init__(self, max_length=255, **kwargs):
super(CharField, self).__init__(**kwargs)
self.max_length = max_length
def validate(self, instance):
errors = []
try:
super(CharField, self).validate(instance)
except FieldValidationError as err:
errors.extend(err.errors)
val = getattr(instance, self.name)
if val and len(val) > self.max_length:
errors.append((self.name, 'exceeds max length'))
if errors:
raise FieldValidationError(errors)
class BooleanField(Attribute):
def typecast_for_read(self, value):
return bool(int(value))
def typecast_for_storage(self, value):
if value is None:
return "0"
return "1" if value else "0"
def value_type(self):
return bool
def acceptable_types(self):
return self.value_type()
class IntegerField(Attribute):
def typecast_for_read(self, value):
return int(value)
def typecast_for_storage(self, value):
if value is None:
return "0"
return unicode(value)
def value_type(self):
return int
def acceptable_types(self):
return (int, long)
class FloatField(Attribute):
def typecast_for_read(self, value):
return float(value)
def typecast_for_storage(self, value):
if value is None:
return "0"
return "%f" % value
def value_type(self):
return float
def acceptable_types(self):
return self.value_type()
class DateTimeField(Attribute):
def __init__(self, auto_now=False, auto_now_add=False, **kwargs):
super(DateTimeField, self).__init__(**kwargs)
self.auto_now = auto_now
self.auto_now_add = auto_now_add
def typecast_for_read(self, value):
try:
# We load as if the timestampe was naive
dt = datetime.fromtimestamp(float(value), tzutc())
# And gently override (ie: not convert) to the TZ to UTC
return dt
except TypeError:
return None
except ValueError:
return None
def typecast_for_storage(self, value):
if not isinstance(value, datetime):
raise TypeError("%s should be datetime object, and not a %s" %
(self.name, type(value)))
if value is None:
return None
# Are we timezone aware ? If no, make it TimeZone Local
if value.tzinfo is None:
value = value.replace(tzinfo=tzlocal())
return "%d.%06d" % (float(timegm(value.utctimetuple())), value.microsecond)
def value_type(self):
return datetime
def acceptable_types(self):
return self.value_type()
class DateField(Attribute):
def __init__(self, auto_now=False, auto_now_add=False, **kwargs):
super(DateField, self).__init__(**kwargs)
self.auto_now = auto_now
self.auto_now_add = auto_now_add
def typecast_for_read(self, value):
try:
# We load as if it is UTC time
dt = date.fromtimestamp(float(value))
# And assign (ie: not convert) the UTC TimeZone
return dt
except TypeError:
return None
except ValueError:
return None
def typecast_for_storage(self, value):
if not isinstance(value, date):
raise TypeError("%s should be date object, and not a %s" %
(self.name, type(value)))
if value is None:
return None
return "%d" % float(timegm(value.timetuple()))
def value_type(self):
return date
def acceptable_types(self):
return self.value_type()
class TimeDeltaField(Attribute):
def __init__(self, **kwargs):
super(TimeDeltaField, self).__init__(**kwargs)
if hasattr(timedelta, "totals_seconds"):
def _total_seconds(self, td):
return td.total_seconds
else:
def _total_seconds(self, td):
return (td.microseconds + 0.0 + \
(td.seconds + td.days * 24 * 3600) * 10 ** 6) / 10 ** 6
def typecast_for_read(self, value):
try:
# We load as if it is UTC time
if value is None:
value = 0.
td = timedelta(seconds=float(value))
return td
except TypeError:
return None
except ValueError:
return None
def typecast_for_storage(self, value):
if not isinstance(value, timedelta):
raise TypeError("%s should be timedelta object, and not a %s" %
(self.name, type(value)))
if value is None:
return None
return "%d" % self._total_seconds(value)
def value_type(self):
return timedelta
def acceptable_types(self):
return self.value_type()
class ListField(object):
"""Stores a list of objects.
target_type -- can be a Python object or a redisco model class.
If target_type is not a redisco model class, the target_type should
also a callable that casts the (string) value of a list element into
target_type. E.g. str, unicode, int, float.
ListField also accepts a string that refers to a redisco model.
"""
def __init__(self, target_type,
name=None,
indexed=True,
required=False,
validator=None,
default=None):
self._target_type = target_type
self.name = name
self.indexed = indexed
self.required = required
self.validator = validator
self.default = default or []
from base import Model
self._redisco_model = (isinstance(target_type, basestring) or
issubclass(target_type, Model))
def __get__(self, instance, owner):
try:
return getattr(instance, '_' + self.name)
except AttributeError:
if instance.is_new():
val = self.default
else:
key = instance.key()[self.name]
val = List(key).members
if val is not None:
klass = self.value_type()
if self._redisco_model:
val = filter(lambda o: o is not None, [klass.objects.get_by_id(v) for v in val])
else:
val = [klass(v) for v in val]
self.__set__(instance, val)
return val
def __set__(self, instance, value):
setattr(instance, '_' + self.name, value)
def value_type(self):
if isinstance(self._target_type, basestring):
t = self._target_type
from base import get_model_from_key
self._target_type = get_model_from_key(self._target_type)
if self._target_type is None:
raise ValueError("Unknown Redisco class %s" % t)
return self._target_type
def validate(self, instance):
val = getattr(instance, self.name)
errors = []
if val:
if not isinstance(val, list):
errors.append((self.name, 'bad type'))
else:
for item in val:
if not isinstance(item, self.value_type()):
errors.append((self.name, 'bad type in list'))
# validate first standard stuff
if self.required:
if not val:
errors.append((self.name, 'required'))
# validate using validator
if self.validator:
r = self.validator(val)
if r:
errors.extend(r)
if errors:
raise FieldValidationError(errors)
class Collection(object):
"""
A simple container that will be replaced by the good imports
and the good filter query.
"""
def __init__(self, target_type):
self.target_type = target_type
def __get__(self, instance, owner):
if not isinstance(self.target_type, str):
raise TypeError("A collection only accepts a string representing the Class")
# __import__ should be something like __import__('mymod.mysubmod', fromlist=['MyClass'])
klass_path = self.target_type.split(".")
fromlist = klass_path[-1]
frompath = ".".join(klass_path[0:-1])
# if the path is not empty, then it worth importing the class, otherwise, it's
# a local Class and it's already been imported.
if frompath:
mod = __import__(frompath, fromlist=[fromlist])
else:
mod = sys.modules[__name__]
klass = getattr(mod, fromlist)
return klass.objects.filter(**{instance.__class__.__name__.lower() + '_id': instance.id})
def __set__(self, instance, value):
"""
Prevent the argument to be overriden
"""
raise AttributeError("can't override a collection of object")
class ReferenceField(object):
def __init__(self,
target_type,
name=None,
attname=None,
indexed=True,
required=False,
related_name=None,
default=None,
validator=None):
self._target_type = target_type
self.name = name
self.indexed = indexed
self.required = required
self._attname = attname
self._related_name = related_name
self.validator = validator
self.default = default
def __set__(self, instance, value):
"""
Will set the referenced object unless None is provided
which will simply remove the reference
"""
if not isinstance(value, self.value_type()) and \
value is not None:
raise TypeError
# remove the cached value from the instance
if hasattr(instance, '_' + self.name):
delattr(instance, '_' + self.name)
# Remove the attribute_id reference
setattr(instance, self.attname, None)
# Set it to the new value if any.
if value is not None:
setattr(instance, self.attname, value.id)
def __get__(self, instance, owner):
try:
if not hasattr(instance, '_' + self.name):
o = self.value_type().objects.get_by_id(
getattr(instance, self.attname))
setattr(instance, '_' + self.name, o)
return getattr(instance, '_' + self.name)
except AttributeError:
setattr(instance, '_' + self.name, self.default)
return self.default
def value_type(self):
return self._target_type
@property
def attname(self):
if self._attname is None:
self._attname = self.name + '_id'
return self._attname
@property
def related_name(self):
return self._related_name
def validate(self, instance):
val = getattr(instance, self.name)
errors = []
if val:
if not isinstance(val, self.value_type()):
errors.append((self.name, 'bad type for reference'))
# validate first standard stuff
if self.required:
if not val:
errors.append((self.name, 'required'))
# validate using validator
if self.validator:
r = self.validator(val)
if r:
errors.extend(r)
if errors:
raise FieldValidationError(errors)
class Counter(IntegerField):
def __init__(self, **kwargs):
super(Counter, self).__init__(**kwargs)
if not kwargs.has_key('default') or self.default is None:
self.default = 0
def __set__(self, instance, value):
raise AttributeError("can't set a counter.")
def __get__(self, instance, owner):
if not instance.is_new():
v = instance.db.hget(instance.key(), self.name)
if v is None:
return 0
return int(v)
else:
return 0
ZINDEXABLE = (IntegerField, DateTimeField, DateField, FloatField, Counter)
|
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Tests for ``flocker.provision._install``.
"""
import yaml
from twisted.trial.unittest import SynchronousTestCase
from pyrsistent import freeze, thaw
from textwrap import dedent
from .._install import (
task_configure_flocker_agent,
task_enable_flocker_agent,
run, put, run_from_args,
get_repository_url, UnsupportedDistribution, get_installable_version,
get_repo_options,
_remove_dataset_fields, _remove_private_key,
)
from .._ssh import Put
from .._effect import sequence
from ...acceptance.testtools import DatasetBackend
from ... import __version__ as flocker_version
THE_AGENT_YML_PATH = b"/etc/flocker/agent.yml"
BASIC_AGENT_YML = freeze({
"version": 1,
"control-service": {
"hostname": "192.0.2.42",
"port": 4524,
},
"dataset": {
"backend": "zfs",
},
})
class ConfigureFlockerAgentTests(SynchronousTestCase):
"""
Tests for ``task_configure_flocker_agent``.
"""
def test_agent_yml(self):
"""
```task_configure_flocker_agent`` writes a ``/etc/flocker/agent.yml``
file which contains the backend configuration passed to it.
"""
control_address = BASIC_AGENT_YML["control-service"]["hostname"]
expected_pool = u"some-test-pool"
expected_backend_configuration = dict(pool=expected_pool)
commands = task_configure_flocker_agent(
control_node=control_address,
dataset_backend=DatasetBackend.lookupByName(
BASIC_AGENT_YML["dataset"]["backend"]
),
dataset_backend_configuration=expected_backend_configuration,
)
[put_agent_yml] = list(
effect.intent
for effect in
commands.intent.effects
if isinstance(effect.intent, Put)
)
# Seems like transform should be usable here but I don't know how.
expected_agent_config = BASIC_AGENT_YML.set(
"dataset",
BASIC_AGENT_YML["dataset"].update(expected_backend_configuration)
)
self.assertEqual(
put(
content=yaml.safe_dump(thaw(expected_agent_config)),
path=THE_AGENT_YML_PATH,
log_content_filter=_remove_dataset_fields,
).intent,
put_agent_yml,
)
class EnableFlockerAgentTests(SynchronousTestCase):
"""
Tests for ``task_enable_flocker_agent``.
"""
def test_centos_sequence(self):
"""
``task_enable_flocker_agent`` for the 'centos-7' distribution returns
a sequence of systemctl enable and restart commands for each agent.
"""
distribution = u"centos-7"
commands = task_enable_flocker_agent(
distribution=distribution,
)
expected_sequence = sequence([
run(command="systemctl enable flocker-dataset-agent"),
run(command="systemctl restart flocker-dataset-agent"),
run(command="systemctl enable flocker-container-agent"),
run(command="systemctl restart flocker-container-agent"),
])
self.assertEqual(commands, expected_sequence)
def test_ubuntu_sequence(self):
"""
``task_enable_flocker_agent`` for the 'ubuntu-14.04' distribution
returns a sequence of 'service start' commands for each agent.
"""
distribution = u"ubuntu-14.04"
commands = task_enable_flocker_agent(
distribution=distribution,
)
expected_sequence = sequence([
run(command="service flocker-dataset-agent start"),
run(command="service flocker-container-agent start"),
])
self.assertEqual(commands, expected_sequence)
def _centos7_install_commands(version):
"""
Construct the command sequence expected for installing Flocker on CentOS 7.
:param str version: A Flocker native OS package version (a package name
suffix) like ``"-1.2.3-1"``.
:return: The sequence of commands expected for installing Flocker on
CentOS7.
"""
installable_version = get_installable_version(flocker_version)
return sequence([
run(command="yum clean all"),
run(command="yum install -y {}".format(get_repository_url(
distribution='centos-7',
flocker_version=installable_version,
))),
run_from_args(
['yum', 'install'] + get_repo_options(installable_version) +
['-y', 'clusterhq-flocker-node' + version])
])
class GetRepoOptionsTests(SynchronousTestCase):
"""
Tests for ``get_repo_options``.
"""
def test_marketing_release(self):
"""
No extra repositories are enabled if the latest installable version
is a marketing release.
"""
self.assertEqual(get_repo_options(flocker_version='0.3.0'), [])
def test_development_release(self):
"""
Enabling a testing repository is enabled if the latest installable
version is not a marketing release.
"""
self.assertEqual(
get_repo_options(flocker_version='0.3.0.dev1'),
['--enablerepo=clusterhq-testing'])
class GetRepositoryURLTests(SynchronousTestCase):
"""
Tests for ``get_repository_url``.
"""
def test_centos_7(self):
"""
It is possible to get a repository URL for CentOS 7 packages.
"""
expected = ("https://clusterhq-archive.s3.amazonaws.com/centos/"
"clusterhq-release$(rpm -E %dist).noarch.rpm")
self.assertEqual(
get_repository_url(
distribution='centos-7',
flocker_version='0.3.0'),
expected
)
def test_ubuntu_14_04(self):
"""
It is possible to get a repository URL for Ubuntu 14.04 packages.
"""
expected = ("https://clusterhq-archive.s3.amazonaws.com/ubuntu/"
"$(lsb_release --release --short)/\\$(ARCH)")
self.assertEqual(
get_repository_url(
distribution='ubuntu-14.04',
flocker_version='0.3.0'),
expected
)
def test_ubuntu_15_04(self):
"""
It is possible to get a repository URL for Ubuntu 15.04 packages.
"""
expected = ("https://clusterhq-archive.s3.amazonaws.com/ubuntu/"
"$(lsb_release --release --short)/\\$(ARCH)")
self.assertEqual(
get_repository_url(
distribution='ubuntu-15.04',
flocker_version='0.3.0'),
expected
)
def test_unsupported_distribution(self):
"""
An ``UnsupportedDistribution`` error is thrown if a repository for the
desired distribution cannot be found.
"""
self.assertRaises(
UnsupportedDistribution,
get_repository_url, 'unsupported-os', '0.3.0',
)
def test_non_release_ubuntu(self):
"""
The operating system key for ubuntu has the suffix ``-testing`` for
non-marketing releases.
"""
expected = ("https://clusterhq-archive.s3.amazonaws.com/"
"ubuntu-testing/"
"$(lsb_release --release --short)/\\$(ARCH)")
self.assertEqual(
get_repository_url(
distribution='ubuntu-14.04',
flocker_version='0.3.0.dev1'),
expected
)
def test_non_release_centos(self):
"""
The operating system key for centos stays the same non-marketing
releases.
"""
expected = ("https://clusterhq-archive.s3.amazonaws.com/centos/"
"clusterhq-release$(rpm -E %dist).noarch.rpm")
self.assertEqual(
get_repository_url(
distribution='centos-7',
flocker_version='0.3.0.dev1'),
expected
)
class PrivateKeyLoggingTest(SynchronousTestCase):
"""
Test removal of private keys from logs.
"""
def test_private_key_removed(self):
"""
A private key is removed for logging.
"""
key = dedent('''
-----BEGIN PRIVATE KEY-----
MFDkDKSLDDSf
MFSENSITIVED
MDKODSFJOEWe
-----END PRIVATE KEY-----
''')
self.assertEqual(
dedent('''
-----BEGIN PRIVATE KEY-----
MFDk...REMOVED...OEWe
-----END PRIVATE KEY-----
'''),
_remove_private_key(key))
def test_non_key_kept(self):
"""
Non-key data is kept for logging.
"""
key = 'some random data, not a key'
self.assertEqual(key, _remove_private_key(key))
def test_short_key_kept(self):
"""
A key that is suspiciously short is kept for logging.
"""
key = dedent('''
-----BEGIN PRIVATE KEY-----
short
-----END PRIVATE KEY-----
''')
self.assertEqual(key, _remove_private_key(key))
def test_no_end_key_removed(self):
"""
A missing end tag does not prevent removal working.
"""
key = dedent('''
-----BEGIN PRIVATE KEY-----
MFDkDKSLDDSf
MFSENSITIVED
MDKODSFJOEWe
''')
self.assertEqual(
'\n-----BEGIN PRIVATE KEY-----\nMFDk...REMOVED...OEWe\n',
_remove_private_key(key))
class DatasetLoggingTest(SynchronousTestCase):
"""
Test removal of sensitive information from logged configuration files.
"""
def test_dataset_logged_safely(self):
"""
Values are either the same or replaced by 'REMOVED'.
"""
config = {
'dataset': {
'secret': 'SENSITIVE',
'zone': 'keep'
}
}
content = yaml.safe_dump(config)
logged = _remove_dataset_fields(content)
self.assertEqual(
yaml.safe_load(logged),
{'dataset': {'secret': 'REMOVED', 'zone': 'keep'}})
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Constructs model, inputs, and training environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import time
import tensorflow as tf
from object_detection import eval_util
from object_detection import inputs
from object_detection import model_lib
from object_detection.builders import model_builder
from object_detection.builders import optimizer_builder
from object_detection.core import standard_fields as fields
from object_detection.utils import config_util
from object_detection.utils import label_map_util
from object_detection.utils import ops
from object_detection.utils import variables_helper
MODEL_BUILD_UTIL_MAP = model_lib.MODEL_BUILD_UTIL_MAP
### NOTE: This file is a wip.
### TODO(kaftan): Explore adding unit tests for individual methods
### TODO(kaftan): Add unit test that checks training on a single image w/
#### groundtruth, and verfiy that loss goes to zero.
#### Possibly have version that takes it as the whole train & eval dataset,
#### & verify the loss output from the eval_loop method.
### TODO(kaftan): Make sure the unit tests run in TAP presubmits or Kokoro
def _compute_losses_and_predictions_dicts(
model, features, labels,
add_regularization_loss=True):
"""Computes the losses dict and predictions dict for a model on inputs.
Args:
model: a DetectionModel (based on Keras).
features: Dictionary of feature tensors from the input dataset.
Should be in the format output by `inputs.train_input` and
`inputs.eval_input`.
features[fields.InputDataFields.image] is a [batch_size, H, W, C]
float32 tensor with preprocessed images.
features[HASH_KEY] is a [batch_size] int32 tensor representing unique
identifiers for the images.
features[fields.InputDataFields.true_image_shape] is a [batch_size, 3]
int32 tensor representing the true image shapes, as preprocessed
images could be padded.
features[fields.InputDataFields.original_image] (optional) is a
[batch_size, H, W, C] float32 tensor with original images.
labels: A dictionary of groundtruth tensors post-unstacking. The original
labels are of the form returned by `inputs.train_input` and
`inputs.eval_input`. The shapes may have been modified by unstacking with
`model_lib.unstack_batch`. However, the dictionary includes the following
fields.
labels[fields.InputDataFields.num_groundtruth_boxes] is a
int32 tensor indicating the number of valid groundtruth boxes
per image.
labels[fields.InputDataFields.groundtruth_boxes] is a float32 tensor
containing the corners of the groundtruth boxes.
labels[fields.InputDataFields.groundtruth_classes] is a float32
one-hot tensor of classes.
labels[fields.InputDataFields.groundtruth_weights] is a float32 tensor
containing groundtruth weights for the boxes.
-- Optional --
labels[fields.InputDataFields.groundtruth_instance_masks] is a
float32 tensor containing only binary values, which represent
instance masks for objects.
labels[fields.InputDataFields.groundtruth_keypoints] is a
float32 tensor containing keypoints for each box.
add_regularization_loss: Whether or not to include the model's
regularization loss in the losses dictionary.
Returns:
A tuple containing the losses dictionary (with the total loss under
the key 'Loss/total_loss'), and the predictions dictionary produced by
`model.predict`.
"""
model_lib.provide_groundtruth(model, labels)
preprocessed_images = features[fields.InputDataFields.image]
prediction_dict = model.predict(
preprocessed_images,
features[fields.InputDataFields.true_image_shape])
prediction_dict = ops.bfloat16_to_float32_nested(prediction_dict)
losses_dict = model.loss(
prediction_dict, features[fields.InputDataFields.true_image_shape])
losses = [loss_tensor for loss_tensor in losses_dict.values()]
if add_regularization_loss:
# TODO(kaftan): As we figure out mixed precision & bfloat 16, we may
## need to convert these regularization losses from bfloat16 to float32
## as well.
regularization_losses = model.regularization_losses()
if regularization_losses:
regularization_losses = ops.bfloat16_to_float32_nested(
regularization_losses)
regularization_loss = tf.add_n(
regularization_losses, name='regularization_loss')
losses.append(regularization_loss)
losses_dict['Loss/regularization_loss'] = regularization_loss
total_loss = tf.add_n(losses, name='total_loss')
losses_dict['Loss/total_loss'] = total_loss
return losses_dict, prediction_dict
# TODO(kaftan): Explore removing learning_rate from this method & returning
## The full losses dict instead of just total_loss, then doing all summaries
## saving in a utility method called by the outer training loop.
# TODO(kaftan): Explore adding gradient summaries
def eager_train_step(detection_model,
features,
labels,
unpad_groundtruth_tensors,
optimizer,
learning_rate,
add_regularization_loss=True,
clip_gradients_value=None,
global_step=None,
num_replicas=1.0):
"""Process a single training batch.
This method computes the loss for the model on a single training batch,
while tracking the gradients with a gradient tape. It then updates the
model variables with the optimizer, clipping the gradients if
clip_gradients_value is present.
This method can run eagerly or inside a tf.function.
Args:
detection_model: A DetectionModel (based on Keras) to train.
features: Dictionary of feature tensors from the input dataset.
Should be in the format output by `inputs.train_input.
features[fields.InputDataFields.image] is a [batch_size, H, W, C]
float32 tensor with preprocessed images.
features[HASH_KEY] is a [batch_size] int32 tensor representing unique
identifiers for the images.
features[fields.InputDataFields.true_image_shape] is a [batch_size, 3]
int32 tensor representing the true image shapes, as preprocessed
images could be padded.
features[fields.InputDataFields.original_image] (optional, not used
during training) is a
[batch_size, H, W, C] float32 tensor with original images.
labels: A dictionary of groundtruth tensors. This method unstacks
these labels using model_lib.unstack_batch. The stacked labels are of
the form returned by `inputs.train_input` and `inputs.eval_input`.
labels[fields.InputDataFields.num_groundtruth_boxes] is a [batch_size]
int32 tensor indicating the number of valid groundtruth boxes
per image.
labels[fields.InputDataFields.groundtruth_boxes] is a
[batch_size, num_boxes, 4] float32 tensor containing the corners of
the groundtruth boxes.
labels[fields.InputDataFields.groundtruth_classes] is a
[batch_size, num_boxes, num_classes] float32 one-hot tensor of
classes. num_classes includes the background class.
labels[fields.InputDataFields.groundtruth_weights] is a
[batch_size, num_boxes] float32 tensor containing groundtruth weights
for the boxes.
-- Optional --
labels[fields.InputDataFields.groundtruth_instance_masks] is a
[batch_size, num_boxes, H, W] float32 tensor containing only binary
values, which represent instance masks for objects.
labels[fields.InputDataFields.groundtruth_keypoints] is a
[batch_size, num_boxes, num_keypoints, 2] float32 tensor containing
keypoints for each box.
unpad_groundtruth_tensors: A parameter passed to unstack_batch.
optimizer: The training optimizer that will update the variables.
learning_rate: The learning rate tensor for the current training step.
This is used only for TensorBoard logging purposes, it does not affect
model training.
add_regularization_loss: Whether or not to include the model's
regularization loss in the losses dictionary.
clip_gradients_value: If this is present, clip the gradients global norm
at this value using `tf.clip_by_global_norm`.
global_step: The current training step. Used for TensorBoard logging
purposes. This step is not updated by this function and must be
incremented separately.
num_replicas: The number of replicas in the current distribution strategy.
This is used to scale the total loss so that training in a distribution
strategy works correctly.
Returns:
The total loss observed at this training step
"""
# """Execute a single training step in the TF v2 style loop."""
is_training = True
detection_model._is_training = is_training # pylint: disable=protected-access
tf.keras.backend.set_learning_phase(is_training)
labels = model_lib.unstack_batch(
labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)
with tf.GradientTape() as tape:
losses_dict, _ = _compute_losses_and_predictions_dicts(
detection_model, features, labels, add_regularization_loss)
total_loss = losses_dict['Loss/total_loss']
# Normalize loss for num replicas
total_loss = tf.math.divide(total_loss,
tf.constant(num_replicas, dtype=tf.float32))
losses_dict['Loss/normalized_total_loss'] = total_loss
for loss_type in losses_dict:
tf.compat.v2.summary.scalar(
loss_type, losses_dict[loss_type], step=global_step)
trainable_variables = detection_model.trainable_variables
gradients = tape.gradient(total_loss, trainable_variables)
if clip_gradients_value:
gradients, _ = tf.clip_by_global_norm(gradients, clip_gradients_value)
optimizer.apply_gradients(zip(gradients, trainable_variables))
tf.compat.v2.summary.scalar('learning_rate', learning_rate, step=global_step)
return total_loss
def load_fine_tune_checkpoint(
model, checkpoint_path, checkpoint_type,
load_all_detection_checkpoint_vars, input_dataset,
unpad_groundtruth_tensors):
"""Load a fine tuning classification or detection checkpoint.
To make sure the model variables are all built, this method first executes
the model by computing a dummy loss. (Models might not have built their
variables before their first execution)
It then loads a variable-name based classification or detection checkpoint
that comes from converted TF 1.x slim model checkpoints.
This method updates the model in-place and does not return a value.
Args:
model: A DetectionModel (based on Keras) to load a fine-tuning
checkpoint for.
checkpoint_path: Directory with checkpoints file or path to checkpoint.
checkpoint_type: Whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`.
load_all_detection_checkpoint_vars: whether to load all variables (when
`fine_tune_checkpoint_type` is `detection`). If False, only variables
within the feature extractor scopes are included. Default False.
input_dataset: The tf.data Dataset the model is being trained on. Needed
to get the shapes for the dummy loss computation.
unpad_groundtruth_tensors: A parameter passed to unstack_batch.
"""
features, labels = iter(input_dataset).next()
def _dummy_computation_fn(features, labels):
model._is_training = False # pylint: disable=protected-access
tf.keras.backend.set_learning_phase(False)
labels = model_lib.unstack_batch(
labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)
return _compute_losses_and_predictions_dicts(
model,
features,
labels)
strategy = tf.compat.v2.distribute.get_strategy()
strategy.experimental_run_v2(
_dummy_computation_fn, args=(
features,
labels,
))
var_map = model.restore_map(
fine_tune_checkpoint_type=checkpoint_type,
load_all_detection_checkpoint_vars=(
load_all_detection_checkpoint_vars))
available_var_map = variables_helper.get_variables_available_in_checkpoint(
var_map,
checkpoint_path,
include_global_step=False)
tf.train.init_from_checkpoint(checkpoint_path,
available_var_map)
def train_loop(
hparams,
pipeline_config_path,
model_dir,
config_override=None,
train_steps=None,
use_tpu=False,
save_final_config=False,
export_to_tpu=None,
checkpoint_every_n=1000, **kwargs):
"""Trains a model using eager + functions.
This method:
1. Processes the pipeline configs
2. (Optionally) saves the as-run config
3. Builds the model & optimizer
4. Gets the training input data
5. Loads a fine-tuning detection or classification checkpoint if requested
6. Loops over the train data, executing distributed training steps inside
tf.functions.
7. Checkpoints the model every `checkpoint_every_n` training steps.
8. Logs the training metrics as TensorBoard summaries.
Args:
hparams: A `HParams`.
pipeline_config_path: A path to a pipeline config file.
model_dir:
The directory to save checkpoints and summaries to.
config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to
override the config from `pipeline_config_path`.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
use_tpu: Boolean, whether training and evaluation should run on TPU.
save_final_config: Whether to save final config (obtained after applying
overrides) to `model_dir`.
export_to_tpu: When use_tpu and export_to_tpu are true,
`export_savedmodel()` exports a metagraph for serving on TPU besides the
one on CPU. If export_to_tpu is not provided, we will look for it in
hparams too.
checkpoint_every_n:
Checkpoint every n training steps.
**kwargs: Additional keyword arguments for configuration override.
"""
## Parse the configs
get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[
'get_configs_from_pipeline_file']
merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[
'merge_external_params_with_configs']
create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[
'create_pipeline_proto_from_configs']
configs = get_configs_from_pipeline_file(
pipeline_config_path, config_override=config_override)
kwargs.update({
'train_steps': train_steps,
'use_bfloat16': configs['train_config'].use_bfloat16 and use_tpu
})
configs = merge_external_params_with_configs(
configs, hparams, kwargs_dict=kwargs)
model_config = configs['model']
train_config = configs['train_config']
train_input_config = configs['train_input_config']
unpad_groundtruth_tensors = train_config.unpad_groundtruth_tensors
add_regularization_loss = train_config.add_regularization_loss
clip_gradients_value = None
if train_config.gradient_clipping_by_norm > 0:
clip_gradients_value = train_config.gradient_clipping_by_norm
# update train_steps from config but only when non-zero value is provided
if train_steps is None and train_config.num_steps != 0:
train_steps = train_config.num_steps
# Read export_to_tpu from hparams if not passed.
if export_to_tpu is None:
export_to_tpu = hparams.get('export_to_tpu', False)
tf.logging.info(
'train_loop: use_tpu %s, export_to_tpu %s', use_tpu,
export_to_tpu)
if kwargs['use_bfloat16']:
tf.compat.v2.keras.mixed_precision.experimental.set_policy('mixed_bfloat16')
# Parse the checkpoint fine tuning configs
if hparams.load_pretrained:
fine_tune_checkpoint_path = train_config.fine_tune_checkpoint
else:
fine_tune_checkpoint_path = None
load_all_detection_checkpoint_vars = (
train_config.load_all_detection_checkpoint_vars)
# TODO(kaftan) (or anyone else): move this piece of config munging to
## utils/config_util.py
if not train_config.fine_tune_checkpoint_type:
# train_config.from_detection_checkpoint field is deprecated. For
# backward compatibility, set train_config.fine_tune_checkpoint_type
# based on train_config.from_detection_checkpoint.
if train_config.from_detection_checkpoint:
train_config.fine_tune_checkpoint_type = 'detection'
else:
train_config.fine_tune_checkpoint_type = 'classification'
fine_tune_checkpoint_type = train_config.fine_tune_checkpoint_type
# Write the as-run pipeline config to disk.
if save_final_config:
pipeline_config_final = create_pipeline_proto_from_configs(configs)
config_util.save_pipeline_config(pipeline_config_final, model_dir)
# Build the model, optimizer, and training input
strategy = tf.compat.v2.distribute.get_strategy()
with strategy.scope():
detection_model = model_builder.build(
model_config=model_config, is_training=True)
# Create the inputs.
train_input = inputs.train_input(
train_config=train_config,
train_input_config=train_input_config,
model_config=model_config,
model=detection_model)
train_input = strategy.experimental_distribute_dataset(
train_input.repeat())
global_step = tf.compat.v2.Variable(
0, trainable=False, dtype=tf.compat.v2.dtypes.int64, name='global_step')
optimizer, (learning_rate,) = optimizer_builder.build(
train_config.optimizer, global_step=global_step)
if callable(learning_rate):
learning_rate_fn = learning_rate
else:
learning_rate_fn = lambda: learning_rate
## Train the model
summary_writer = tf.compat.v2.summary.create_file_writer(model_dir + '/train')
with summary_writer.as_default():
with strategy.scope():
# Load a fine-tuning checkpoint.
if fine_tune_checkpoint_path:
load_fine_tune_checkpoint(detection_model, fine_tune_checkpoint_path,
fine_tune_checkpoint_type,
load_all_detection_checkpoint_vars,
train_input,
unpad_groundtruth_tensors)
ckpt = tf.compat.v2.train.Checkpoint(
step=global_step, model=detection_model, optimizer=optimizer)
manager = tf.compat.v2.train.CheckpointManager(
ckpt, model_dir, max_to_keep=7)
ckpt.restore(manager.latest_checkpoint)
def train_step_fn(features, labels):
return eager_train_step(
detection_model,
features,
labels,
unpad_groundtruth_tensors,
optimizer,
learning_rate=learning_rate_fn(),
add_regularization_loss=add_regularization_loss,
clip_gradients_value=clip_gradients_value,
global_step=global_step,
num_replicas=strategy.num_replicas_in_sync)
@tf.function
def _dist_train_step(data_iterator):
"""A distributed train step."""
features, labels = data_iterator.next()
per_replica_losses = strategy.experimental_run_v2(
train_step_fn, args=(
features,
labels,
))
# TODO(anjalisridhar): explore if it is safe to remove the
## num_replicas scaling of the loss and switch this to a ReduceOp.Mean
mean_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
return mean_loss
train_input_iter = iter(train_input)
for _ in range(train_steps - global_step.value()):
start_time = time.time()
loss = _dist_train_step(train_input_iter)
global_step.assign_add(1)
end_time = time.time()
tf.compat.v2.summary.scalar(
'steps_per_sec', 1.0 / (end_time - start_time),
step=global_step)
if (int(global_step.value()) % 100) == 0:
tf.logging.info(
'Step {} time taken {:.3f}s loss={:.3f}'.format(
global_step.value(), end_time - start_time, loss))
if int(global_step.value()) % checkpoint_every_n == 0:
manager.save()
def eager_eval_loop(
detection_model,
configs,
eval_dataset,
use_tpu=False,
postprocess_on_cpu=False,
global_step=None):
"""Evaluate the model eagerly on the evaluation dataset.
This method will compute the evaluation metrics specified in the configs on
the entire evaluation dataset, then return the metrics. It will also log
the metrics to TensorBoard
Args:
detection_model: A DetectionModel (based on Keras) to evaluate.
configs: Object detection configs that specify the evaluators that should
be used, as well as whether regularization loss should be included and
if bfloat16 should be used on TPUs.
eval_dataset: Dataset containing evaluation data.
use_tpu: Whether a TPU is being used to execute the model for evaluation.
postprocess_on_cpu: Whether model postprocessing should happen on
the CPU when using a TPU to execute the model.
global_step: A variable containing the training step this model was trained
to. Used for logging purposes.
Returns:
A dict of evaluation metrics representing the results of this evaluation.
"""
train_config = configs['train_config']
eval_input_config = configs['eval_input_config']
eval_config = configs['eval_config']
add_regularization_loss = train_config.add_regularization_loss
is_training = False
detection_model._is_training = is_training # pylint: disable=protected-access
tf.keras.backend.set_learning_phase(is_training)
evaluator_options = eval_util.evaluator_options_from_eval_config(
eval_config)
class_agnostic_category_index = (
label_map_util.create_class_agnostic_category_index())
class_agnostic_evaluators = eval_util.get_evaluators(
eval_config,
list(class_agnostic_category_index.values()),
evaluator_options)
class_aware_evaluators = None
if eval_input_config.label_map_path:
class_aware_category_index = (
label_map_util.create_category_index_from_labelmap(
eval_input_config.label_map_path))
class_aware_evaluators = eval_util.get_evaluators(
eval_config,
list(class_aware_category_index.values()),
evaluator_options)
evaluators = None
loss_metrics = {}
@tf.function
def compute_eval_dict(features, labels):
"""Compute the evaluation result on an image."""
# For evaling on train data, it is necessary to check whether groundtruth
# must be unpadded.
boxes_shape = (
labels[fields.InputDataFields.groundtruth_boxes].get_shape().as_list())
unpad_groundtruth_tensors = boxes_shape[1] is not None and not use_tpu
labels = model_lib.unstack_batch(
labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)
losses_dict, prediction_dict = _compute_losses_and_predictions_dicts(
detection_model, features, labels, add_regularization_loss)
def postprocess_wrapper(args):
return detection_model.postprocess(args[0], args[1])
# TODO(kaftan): Depending on how postprocessing will work for TPUS w/
## TPUStrategy, may be good to move wrapping to a utility method
if use_tpu and postprocess_on_cpu:
detections = tf.contrib.tpu.outside_compilation(
postprocess_wrapper,
(prediction_dict, features[fields.InputDataFields.true_image_shape]))
else:
detections = postprocess_wrapper(
(prediction_dict, features[fields.InputDataFields.true_image_shape]))
class_agnostic = (
fields.DetectionResultFields.detection_classes not in detections)
# TODO(kaftan) (or anyone): move `_prepare_groundtruth_for_eval to eval_util
## and call this from there.
groundtruth = model_lib._prepare_groundtruth_for_eval( # pylint: disable=protected-access
detection_model, class_agnostic, eval_input_config.max_number_of_boxes)
use_original_images = fields.InputDataFields.original_image in features
if use_original_images:
eval_images = features[fields.InputDataFields.original_image]
true_image_shapes = tf.slice(
features[fields.InputDataFields.true_image_shape], [0, 0], [-1, 3])
original_image_spatial_shapes = features[
fields.InputDataFields.original_image_spatial_shape]
else:
eval_images = features[fields.InputDataFields.image]
true_image_shapes = None
original_image_spatial_shapes = None
eval_dict = eval_util.result_dict_for_batched_example(
eval_images,
features[inputs.HASH_KEY],
detections,
groundtruth,
class_agnostic=class_agnostic,
scale_to_absolute=True,
original_image_spatial_shapes=original_image_spatial_shapes,
true_image_shapes=true_image_shapes)
return eval_dict, losses_dict, class_agnostic
for i, (features, labels) in enumerate(eval_dataset):
eval_dict, losses_dict, class_agnostic = compute_eval_dict(features, labels)
if i % 100 == 0:
tf.logging.info('Finished eval step %d', i)
if evaluators is None:
if class_agnostic:
evaluators = class_agnostic_evaluators
else:
evaluators = class_aware_evaluators
for evaluator in evaluators:
evaluator.add_eval_dict(eval_dict)
for loss_key, loss_tensor in iter(losses_dict.items()):
if loss_key not in loss_metrics:
loss_metrics[loss_key] = tf.keras.metrics.Mean()
loss_metrics[loss_key].update_state(loss_tensor)
eval_metrics = {}
for evaluator in evaluators:
eval_metrics.update(evaluator.evaluate())
for loss_key in loss_metrics:
eval_metrics[loss_key] = loss_metrics[loss_key].result()
eval_metrics = {str(k): v for k, v in eval_metrics.items()}
for k in eval_metrics:
tf.compat.v2.summary.scalar(k, eval_metrics[k], step=global_step)
return eval_metrics
def eval_continuously(
hparams,
pipeline_config_path,
config_override=None,
train_steps=None,
sample_1_of_n_eval_examples=1,
sample_1_of_n_eval_on_train_examples=1,
use_tpu=False,
override_eval_num_epochs=True,
postprocess_on_cpu=False,
export_to_tpu=None,
model_dir=None,
checkpoint_dir=None,
wait_interval=180,
**kwargs):
"""Run continuous evaluation of a detection model eagerly.
This method builds the model, and continously restores it from the most
recent training checkpoint in the checkpoint directory & evaluates it
on the evaluation data.
Args:
hparams: A `HParams`.
pipeline_config_path: A path to a pipeline config file.
config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to
override the config from `pipeline_config_path`.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
sample_1_of_n_eval_examples: Integer representing how often an eval example
should be sampled. If 1, will sample all examples.
sample_1_of_n_eval_on_train_examples: Similar to
`sample_1_of_n_eval_examples`, except controls the sampling of training
data for evaluation.
use_tpu: Boolean, whether training and evaluation should run on TPU.
override_eval_num_epochs: Whether to overwrite the number of epochs to 1 for
eval_input.
postprocess_on_cpu: When use_tpu and postprocess_on_cpu are true,
postprocess is scheduled on the host cpu.
export_to_tpu: When use_tpu and export_to_tpu are true,
`export_savedmodel()` exports a metagraph for serving on TPU besides the
one on CPU. If export_to_tpu is not provided, we will look for it in
hparams too.
model_dir:
Directory to output resulting evaluation summaries to.
checkpoint_dir:
Directory that contains the training checkpoints.
wait_interval:
Terminate evaluation in no new checkpoints arrive within this wait
interval (in seconds).
**kwargs: Additional keyword arguments for configuration override.
"""
get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[
'get_configs_from_pipeline_file']
merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[
'merge_external_params_with_configs']
configs = get_configs_from_pipeline_file(
pipeline_config_path, config_override=config_override)
kwargs.update({
'sample_1_of_n_eval_examples': sample_1_of_n_eval_examples,
'use_bfloat16': configs['train_config'].use_bfloat16 and use_tpu
})
if train_steps is not None:
kwargs['train_steps'] = train_steps
if override_eval_num_epochs:
kwargs.update({'eval_num_epochs': 1})
tf.logging.warning(
'Forced number of epochs for all eval validations to be 1.')
configs = merge_external_params_with_configs(
configs, hparams, kwargs_dict=kwargs)
model_config = configs['model']
train_input_config = configs['train_input_config']
eval_config = configs['eval_config']
eval_input_configs = configs['eval_input_configs']
eval_on_train_input_config = copy.deepcopy(train_input_config)
eval_on_train_input_config.sample_1_of_n_examples = (
sample_1_of_n_eval_on_train_examples)
if override_eval_num_epochs and eval_on_train_input_config.num_epochs != 1:
tf.logging.warning('Expected number of evaluation epochs is 1, but '
'instead encountered `eval_on_train_input_config'
'.num_epochs` = '
'{}. Overwriting `num_epochs` to 1.'.format(
eval_on_train_input_config.num_epochs))
eval_on_train_input_config.num_epochs = 1
if kwargs['use_bfloat16']:
tf.compat.v2.keras.mixed_precision.experimental.set_policy('mixed_bfloat16')
detection_model = model_builder.build(
model_config=model_config, is_training=True)
# Create the inputs.
eval_inputs = []
for eval_input_config in eval_input_configs:
next_eval_input = inputs.eval_input(
eval_config=eval_config,
eval_input_config=eval_input_config,
model_config=model_config,
model=detection_model)
eval_inputs.append((eval_input_config.name, next_eval_input))
# Read export_to_tpu from hparams if not passed.
if export_to_tpu is None:
export_to_tpu = hparams.get('export_to_tpu', False)
tf.logging.info('eval_continuously: use_tpu %s, export_to_tpu %s',
use_tpu, export_to_tpu)
global_step = tf.compat.v2.Variable(
0, trainable=False, dtype=tf.compat.v2.dtypes.int64)
prev_checkpoint = None
waiting = False
while True:
ckpt = tf.compat.v2.train.Checkpoint(
step=global_step, model=detection_model)
manager = tf.compat.v2.train.CheckpointManager(
ckpt, checkpoint_dir, max_to_keep=3)
latest_checkpoint = manager.latest_checkpoint
if prev_checkpoint == latest_checkpoint:
if prev_checkpoint is None:
tf.logging.info('No checkpoints found yet. Trying again in %s seconds.'
% wait_interval)
time.sleep(wait_interval)
else:
if waiting:
tf.logging.info('Terminating eval after %s seconds of no new '
'checkpoints.' % wait_interval)
break
else:
tf.logging.info('No new checkpoint found. Will try again '
'in %s seconds and terminate if no checkpoint '
'appears.' % wait_interval)
waiting = True
time.sleep(wait_interval)
else:
tf.logging.info('New checkpoint found. Starting evaluation.')
waiting = False
prev_checkpoint = latest_checkpoint
ckpt.restore(latest_checkpoint)
for eval_name, eval_input in eval_inputs:
summary_writer = tf.compat.v2.summary.create_file_writer(
model_dir + '/eval' + eval_name)
with summary_writer.as_default():
eager_eval_loop(
detection_model,
configs,
eval_input,
use_tpu=use_tpu,
postprocess_on_cpu=postprocess_on_cpu,
global_step=global_step)
|
|
"""
Collect all the interesting data for analysis
"""
import os
import errno
import json
import archive
import logging
import copy
import glob
from subprocess import Popen, PIPE, STDOUT
from tempfile import NamedTemporaryFile
from ..contrib.soscleaner import SOSCleaner
from utilities import _expand_paths, generate_analysis_target_id
from constants import InsightsConstants as constants
from insights_spec import InsightsFile, InsightsCommand
from config import CONFIG as config
APP_NAME = constants.app_name
logger = logging.getLogger(__name__)
# python 2.7
SOSCLEANER_LOGGER = logging.getLogger('soscleaner')
SOSCLEANER_LOGGER.setLevel(logging.ERROR)
# python 2.6
SOSCLEANER_LOGGER = logging.getLogger('insights-client.soscleaner')
SOSCLEANER_LOGGER.setLevel(logging.ERROR)
class DataCollector(object):
'''
Run commands and collect files
'''
def __init__(self, archive_=None, config=None, mountpoint=None, target_name='', target_type='host'):
self.archive = archive_ if archive_ else archive.InsightsArchive()
self.mountpoint = '/'
if mountpoint:
self.mountpoint = mountpoint
self.target_name = target_name
self.target_type = target_type
self.config = config
def _get_meta_path(self, specname, conf):
# should really never need these
# since spec should always have an "archive_file_name"
# unless we are running old style spec
default_meta_spec = {'analysis_target': '/insights_data/analysis_target',
'branch_info': '/branch_info',
'machine-id': '/insights_data/machine-id',
'uploader_log': '/insights_data/insights_logs/insights.log'}
try:
archive_path = conf['meta_specs'][specname]['archive_file_name']
except LookupError:
logger.debug('%s spec not found. Using default.', specname)
archive_path = default_meta_spec[specname]
return archive_path
def _write_branch_info(self, conf, branch_info):
logger.debug("Writing branch information to archive...")
self.archive.add_metadata_to_archive(json.dumps(branch_info),
self._get_meta_path('branch_info', conf))
def _write_analysis_target_type(self, conf):
logger.debug('Writing target type to archive...')
self.archive.add_metadata_to_archive(self.target_type,
self._get_meta_path('analysis_target', conf))
def _write_analysis_target_id(self, conf):
# AKA machine-id
logger.debug('Writing machine-id to archive...')
if config['from_file'] is not None:
try:
with open(config['from_file']) as f:
stdin_config = json.load(f)
machine_id = stdin_config['machine-id']
except:
machine_id = generate_analysis_target_id(self.target_type, self.target_name)
else:
machine_id = generate_analysis_target_id(self.target_type, self.target_name)
self.archive.add_metadata_to_archive(machine_id,
self._get_meta_path('machine-id', conf))
def _write_uploader_log(self, conf):
logger.debug('Writing insights.log to archive...')
with open(config['logging_file']) as logfile:
self.archive.add_metadata_to_archive(logfile.read().strip().decode('utf-8'),
self._get_meta_path('uploader_log', conf))
def _run_pre_command(self, pre_cmd):
'''
Run a pre command to get external args for a command
'''
logger.debug('Executing pre-command: %s', pre_cmd)
try:
pre_proc = Popen(pre_cmd, stdout=PIPE, stderr=STDOUT, shell=True)
except OSError as err:
if err.errno == errno.ENOENT:
logger.debug('Command %s not found', pre_cmd)
return
stdout, stderr = pre_proc.communicate()
the_return_code = pre_proc.poll()
logger.debug("Pre-command results:")
logger.debug("STDOUT: %s", stdout)
logger.debug("STDERR: %s", stderr)
logger.debug("Return Code: %s", the_return_code)
if the_return_code != 0:
return []
return stdout.splitlines()
def _parse_file_spec(self, spec):
'''
Separate wildcard specs into more specs
'''
# separate wildcard specs into more specs
if '*' in spec['file']:
expanded_paths = _expand_paths(spec['file'].replace(
'{CONTAINER_MOUNT_POINT}', self.mountpoint).replace(
'{DOCKER_IMAGE_NAME}', self.target_name).replace(
'{DOCKER_CONTAINER_NAME}', self.target_name))
if not expanded_paths:
return []
expanded_specs = []
for p in expanded_paths:
_spec = copy.copy(spec)
_spec['file'] = p
expanded_specs.append(_spec)
return expanded_specs
else:
return [spec]
def _parse_glob_spec(self, spec):
'''
Grab globs of things
'''
some_globs = glob.glob(spec['glob'])
if not some_globs:
return []
el_globs = []
for g in some_globs:
_spec = copy.copy(spec)
_spec['file'] = g
el_globs.append(_spec)
return el_globs
def _parse_command_spec(self, spec, precmds):
'''
Run pre_commands
'''
if 'pre_command' in spec:
precmd_alias = spec['pre_command']
try:
precmd = precmds[precmd_alias]
args = self._run_pre_command(precmd)
logger.debug('Pre-command results: %s', args)
expanded_specs = []
for arg in args:
_spec = copy.copy(spec)
_spec['command'] = _spec['command'] + ' ' + arg
expanded_specs.append(_spec)
return expanded_specs
except LookupError:
logger.debug('Pre-command %s not found. Skipping %s...',
precmd_alias, spec['command'])
return []
else:
return [spec]
def run_specific_specs(self, metadata_spec, conf, rm_conf, exclude, branch_info):
'''
Running metadata collection for specific environment
'''
logger.debug('Beginning to run collection spec for %s...', metadata_spec)
if metadata_spec in conf:
for spec in conf[metadata_spec]:
if 'file' in spec:
spec['archive_file_name'] = spec['file']
if rm_conf and 'files' in rm_conf and spec['file'] in rm_conf['files']:
logger.warn("WARNING: Skipping file %s", spec['file'])
continue
else:
file_specs = self._parse_file_spec(spec)
for s in file_specs:
file_spec = InsightsFile(s, exclude, self.mountpoint, self.target_name)
self.archive.add_to_archive(file_spec)
elif 'glob' in spec:
glob_specs = self._parse_glob_spec(spec)
for g in glob_specs:
if rm_conf and 'files' in rm_conf and g['file'] in rm_conf['files']:
logger.warn("WARNING: Skipping file %s", g)
continue
else:
glob_spec = InsightsFile(g, exclude, self.mountpoint, self.target_name)
self.archive.add_to_archive(glob_spec)
elif 'command' in spec:
if rm_conf and 'commands' in rm_conf and spec['command'] in rm_conf['commands']:
logger.warn("WARNING: Skipping command %s", spec['command'])
continue
else:
cmd_specs = self._parse_command_spec(spec, conf['pre_commands'])
for s in cmd_specs:
cmd_spec = InsightsCommand(s, exclude, self.mountpoint, self.target_name, self.config)
self.archive.add_to_archive(cmd_spec)
else:
logger.debug('Spec metadata type "%s" not found in spec.', metadata_spec)
logger.debug('Spec metadata collection finished.')
def run_collection(self, conf, rm_conf, branch_info):
'''
Run specs and collect all the data
'''
logger.debug('Beginning to run collection spec...')
exclude = None
if rm_conf:
try:
exclude = rm_conf['patterns']
except LookupError:
logger.debug('Could not parse remove.conf. Ignoring...')
if config['run_specific_specs'] is not None:
logger.debug('Running specific specs %s', config['run_specific_specs'])
for specific_spec in config['run_specific_specs'].split(','):
logger.debug('Running specific spec %s', specific_spec)
self.run_specific_specs(specific_spec, conf, rm_conf, exclude, branch_info)
logger.debug('Finished running specific spec %s', specific_spec)
return
for specname in conf['specs']:
try:
# spec group for a s
spec_group = conf['specs'][specname]
# list of specs for a target
# there might be more than one spec (for compatability)
spec_list = spec_group[self.target_type]
for spec in spec_list:
if 'file' in spec:
if rm_conf and 'files' in rm_conf and spec['file'] in rm_conf['files']:
logger.warn("WARNING: Skipping file %s", spec['file'])
continue
else:
file_specs = self._parse_file_spec(spec)
for s in file_specs:
file_spec = InsightsFile(s, exclude, self.mountpoint, self.target_name)
self.archive.add_to_archive(file_spec)
elif 'glob' in spec:
glob_specs = self._parse_glob_spec(spec)
for g in glob_specs:
if rm_conf and 'files' in rm_conf and g['file'] in rm_conf['files']:
logger.warn("WARNING: Skipping file %s", g)
continue
else:
glob_spec = InsightsFile(g, exclude, self.mountpoint, self.target_name)
self.archive.add_to_archive(glob_spec)
elif 'command' in spec:
if rm_conf and 'commands' in rm_conf and spec['command'] in rm_conf['commands']:
logger.warn("WARNING: Skipping command %s", spec['command'])
continue
else:
cmd_specs = self._parse_command_spec(spec, conf['pre_commands'])
for s in cmd_specs:
cmd_spec = InsightsCommand(s, exclude, self.mountpoint, self.target_name, self.config)
self.archive.add_to_archive(cmd_spec)
except LookupError:
logger.debug('Target type %s not found in spec %s. Skipping...', self.target_type, specname)
continue
logger.debug('Spec collection finished.')
# collect metadata
logger.debug('Collecting metadata...')
self._write_analysis_target_type(conf)
self._write_branch_info(conf, branch_info)
self._write_analysis_target_id(conf)
logger.debug('Metadata collection finished.')
def done(self, conf, rm_conf):
"""
Do finalization stuff
"""
self._write_uploader_log(conf)
if config["obfuscate"]:
cleaner = SOSCleaner(quiet=True)
clean_opts = CleanOptions(self.archive.tmp_dir, rm_conf)
fresh = cleaner.clean_report(clean_opts, self.archive.archive_dir)
if clean_opts.keyword_file is not None:
os.remove(clean_opts.keyword_file.name)
return fresh[0]
return self.archive.create_tar_file()
class CleanOptions(object):
"""
Options for soscleaner
"""
def __init__(self, tmp_dir, rm_conf):
self.report_dir = tmp_dir
self.domains = []
self.files = []
self.quiet = True
self.keyword_file = None
self.keywords = None
if rm_conf:
try:
keywords = rm_conf['keywords']
self.keyword_file = NamedTemporaryFile(delete=False)
self.keyword_file.write("\n".join(keywords))
self.keyword_file.flush()
self.keyword_file.close()
self.keywords = [self.keyword_file.name]
logger.debug("Attmpting keyword obfuscation")
except LookupError:
pass
if config["obfuscate_hostname"]:
self.hostname_path = "insights_commands/hostname"
else:
self.hostname_path = None
|
|
"""Provides device automations for MQTT."""
import logging
from typing import Callable, List, Optional
import attr
import voluptuous as vol
from homeassistant.components import mqtt
from homeassistant.components.automation import AutomationActionType
import homeassistant.components.automation.mqtt as automation_mqtt
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.const import CONF_DEVICE_ID, CONF_DOMAIN, CONF_PLATFORM, CONF_TYPE
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from . import (
ATTR_DISCOVERY_HASH,
ATTR_DISCOVERY_TOPIC,
CONF_CONNECTIONS,
CONF_DEVICE,
CONF_IDENTIFIERS,
CONF_PAYLOAD,
CONF_QOS,
DOMAIN,
cleanup_device_registry,
debug_info,
)
from .discovery import MQTT_DISCOVERY_UPDATED, clear_discovery_hash
_LOGGER = logging.getLogger(__name__)
CONF_AUTOMATION_TYPE = "automation_type"
CONF_DISCOVERY_ID = "discovery_id"
CONF_SUBTYPE = "subtype"
CONF_TOPIC = "topic"
DEFAULT_ENCODING = "utf-8"
DEVICE = "device"
MQTT_TRIGGER_BASE = {
# Trigger when MQTT message is received
CONF_PLATFORM: DEVICE,
CONF_DOMAIN: DOMAIN,
}
TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_PLATFORM): DEVICE,
vol.Required(CONF_DOMAIN): DOMAIN,
vol.Required(CONF_DEVICE_ID): str,
vol.Required(CONF_DISCOVERY_ID): str,
vol.Required(CONF_TYPE): cv.string,
vol.Required(CONF_SUBTYPE): cv.string,
}
)
TRIGGER_DISCOVERY_SCHEMA = mqtt.MQTT_BASE_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_AUTOMATION_TYPE): str,
vol.Required(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Required(CONF_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_PAYLOAD, default=None): vol.Any(None, cv.string),
vol.Required(CONF_TYPE): cv.string,
vol.Required(CONF_SUBTYPE): cv.string,
},
mqtt.validate_device_has_at_least_one_identifier,
)
DEVICE_TRIGGERS = "mqtt_device_triggers"
@attr.s(slots=True)
class TriggerInstance:
"""Attached trigger settings."""
action: AutomationActionType = attr.ib()
automation_info: dict = attr.ib()
trigger: "Trigger" = attr.ib()
remove: Optional[CALLBACK_TYPE] = attr.ib(default=None)
async def async_attach_trigger(self):
"""Attach MQTT trigger."""
mqtt_config = {
automation_mqtt.CONF_TOPIC: self.trigger.topic,
automation_mqtt.CONF_ENCODING: DEFAULT_ENCODING,
automation_mqtt.CONF_QOS: self.trigger.qos,
}
if self.trigger.payload:
mqtt_config[CONF_PAYLOAD] = self.trigger.payload
if self.remove:
self.remove()
self.remove = await automation_mqtt.async_attach_trigger(
self.trigger.hass, mqtt_config, self.action, self.automation_info,
)
@attr.s(slots=True)
class Trigger:
"""Device trigger settings."""
device_id: str = attr.ib()
discovery_data: dict = attr.ib()
hass: HomeAssistantType = attr.ib()
payload: str = attr.ib()
qos: int = attr.ib()
remove_signal: Callable[[], None] = attr.ib()
subtype: str = attr.ib()
topic: str = attr.ib()
type: str = attr.ib()
trigger_instances: List[TriggerInstance] = attr.ib(factory=list)
async def add_trigger(self, action, automation_info):
"""Add MQTT trigger."""
instance = TriggerInstance(action, automation_info, self)
self.trigger_instances.append(instance)
if self.topic is not None:
# If we know about the trigger, subscribe to MQTT topic
await instance.async_attach_trigger()
@callback
def async_remove() -> None:
"""Remove trigger."""
if instance not in self.trigger_instances:
raise HomeAssistantError("Can't remove trigger twice")
if instance.remove:
instance.remove()
self.trigger_instances.remove(instance)
return async_remove
async def update_trigger(self, config, discovery_hash, remove_signal):
"""Update MQTT device trigger."""
self.remove_signal = remove_signal
self.type = config[CONF_TYPE]
self.subtype = config[CONF_SUBTYPE]
self.payload = config[CONF_PAYLOAD]
self.qos = config[CONF_QOS]
topic_changed = self.topic != config[CONF_TOPIC]
self.topic = config[CONF_TOPIC]
# Unsubscribe+subscribe if this trigger is in use and topic has changed
# If topic is same unsubscribe+subscribe will execute in the wrong order
# because unsubscribe is done with help of async_create_task
if topic_changed:
for trig in self.trigger_instances:
await trig.async_attach_trigger()
def detach_trigger(self):
"""Remove MQTT device trigger."""
# Mark trigger as unknown
self.topic = None
# Unsubscribe if this trigger is in use
for trig in self.trigger_instances:
if trig.remove:
trig.remove()
trig.remove = None
async def _update_device(hass, config_entry, config):
"""Update device registry."""
device_registry = await hass.helpers.device_registry.async_get_registry()
config_entry_id = config_entry.entry_id
device_info = mqtt.device_info_from_config(config[CONF_DEVICE])
if config_entry_id is not None and device_info is not None:
device_info["config_entry_id"] = config_entry_id
device_registry.async_get_or_create(**device_info)
async def async_setup_trigger(hass, config, config_entry, discovery_data):
"""Set up the MQTT device trigger."""
config = TRIGGER_DISCOVERY_SCHEMA(config)
discovery_hash = discovery_data[ATTR_DISCOVERY_HASH]
discovery_id = discovery_hash[1]
remove_signal = None
async def discovery_update(payload):
"""Handle discovery update."""
_LOGGER.info(
"Got update for trigger with hash: %s '%s'", discovery_hash, payload
)
if not payload:
# Empty payload: Remove trigger
_LOGGER.info("Removing trigger: %s", discovery_hash)
debug_info.remove_trigger_discovery_data(hass, discovery_hash)
if discovery_id in hass.data[DEVICE_TRIGGERS]:
device_trigger = hass.data[DEVICE_TRIGGERS][discovery_id]
device_trigger.detach_trigger()
clear_discovery_hash(hass, discovery_hash)
remove_signal()
await cleanup_device_registry(hass, device.id)
else:
# Non-empty payload: Update trigger
_LOGGER.info("Updating trigger: %s", discovery_hash)
debug_info.update_trigger_discovery_data(hass, discovery_hash, payload)
config = TRIGGER_DISCOVERY_SCHEMA(payload)
await _update_device(hass, config_entry, config)
device_trigger = hass.data[DEVICE_TRIGGERS][discovery_id]
await device_trigger.update_trigger(config, discovery_hash, remove_signal)
remove_signal = async_dispatcher_connect(
hass, MQTT_DISCOVERY_UPDATED.format(discovery_hash), discovery_update
)
await _update_device(hass, config_entry, config)
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get_device(
{(DOMAIN, id_) for id_ in config[CONF_DEVICE][CONF_IDENTIFIERS]},
{tuple(x) for x in config[CONF_DEVICE][CONF_CONNECTIONS]},
)
if device is None:
return
if DEVICE_TRIGGERS not in hass.data:
hass.data[DEVICE_TRIGGERS] = {}
if discovery_id not in hass.data[DEVICE_TRIGGERS]:
hass.data[DEVICE_TRIGGERS][discovery_id] = Trigger(
hass=hass,
device_id=device.id,
discovery_data=discovery_data,
type=config[CONF_TYPE],
subtype=config[CONF_SUBTYPE],
topic=config[CONF_TOPIC],
payload=config[CONF_PAYLOAD],
qos=config[CONF_QOS],
remove_signal=remove_signal,
)
else:
await hass.data[DEVICE_TRIGGERS][discovery_id].update_trigger(
config, discovery_hash, remove_signal
)
debug_info.add_trigger_discovery_data(
hass, discovery_hash, discovery_data, device.id
)
async def async_device_removed(hass: HomeAssistant, device_id: str):
"""Handle the removal of a device."""
triggers = await async_get_triggers(hass, device_id)
for trig in triggers:
device_trigger = hass.data[DEVICE_TRIGGERS].pop(trig[CONF_DISCOVERY_ID])
if device_trigger:
discovery_hash = device_trigger.discovery_data[ATTR_DISCOVERY_HASH]
discovery_topic = device_trigger.discovery_data[ATTR_DISCOVERY_TOPIC]
debug_info.remove_trigger_discovery_data(hass, discovery_hash)
device_trigger.detach_trigger()
clear_discovery_hash(hass, discovery_hash)
device_trigger.remove_signal()
mqtt.publish(
hass, discovery_topic, "", retain=True,
)
async def async_get_triggers(hass: HomeAssistant, device_id: str) -> List[dict]:
"""List device triggers for MQTT devices."""
triggers = []
if DEVICE_TRIGGERS not in hass.data:
return triggers
for discovery_id, trig in hass.data[DEVICE_TRIGGERS].items():
if trig.device_id != device_id or trig.topic is None:
continue
trigger = {
**MQTT_TRIGGER_BASE,
"device_id": device_id,
"type": trig.type,
"subtype": trig.subtype,
"discovery_id": discovery_id,
}
triggers.append(trigger)
return triggers
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
if DEVICE_TRIGGERS not in hass.data:
hass.data[DEVICE_TRIGGERS] = {}
config = TRIGGER_SCHEMA(config)
device_id = config[CONF_DEVICE_ID]
discovery_id = config[CONF_DISCOVERY_ID]
if discovery_id not in hass.data[DEVICE_TRIGGERS]:
hass.data[DEVICE_TRIGGERS][discovery_id] = Trigger(
hass=hass,
device_id=device_id,
discovery_data=None,
remove_signal=None,
type=config[CONF_TYPE],
subtype=config[CONF_SUBTYPE],
topic=None,
payload=None,
qos=None,
)
return await hass.data[DEVICE_TRIGGERS][discovery_id].add_trigger(
action, automation_info
)
|
|
"""
China(mainland)-specific Form helpers
"""
from __future__ import absolute_import, unicode_literals
import re
from django.forms import ValidationError
from django.forms.fields import CharField, RegexField, Select
from django.utils.translation import ugettext_lazy as _
from .cn_provinces import CN_PROVINCE_CHOICES
__all__ = (
'CNProvinceSelect',
'CNPostCodeField',
'CNIDCardField',
'CNPhoneNumberField',
'CNCellNumberField',
)
ID_CARD_RE = r'^\d{15}(\d{2}[0-9xX])?$'
POST_CODE_RE = r'^\d{6}$'
PHONE_RE = r'^\d{3,4}-\d{7,8}(-\d+)?$'
CELL_RE = r'^1[34578]\d{9}$'
# Valid location code used in id card checking algorithm
CN_LOCATION_CODES = (
11, # Beijing
12, # Tianjin
13, # Hebei
14, # Shanxi
15, # Nei Mongol
21, # Liaoning
22, # Jilin
23, # Heilongjiang
31, # Shanghai
32, # Jiangsu
33, # Zhejiang
34, # Anhui
35, # Fujian
36, # Jiangxi
37, # Shandong
41, # Henan
42, # Hubei
43, # Hunan
44, # Guangdong
45, # Guangxi
46, # Hainan
50, # Chongqing
51, # Sichuan
52, # Guizhou
53, # Yunnan
54, # Xizang
61, # Shaanxi
62, # Gansu
63, # Qinghai
64, # Ningxia
65, # Xinjiang
71, # Taiwan
81, # Hong Kong
91, # Macao
)
class CNProvinceSelect(Select):
"""
A select widget providing the list of provinces and districts
in People's Republic of China as choices.
"""
def __init__(self, attrs=None):
super(CNProvinceSelect, self).__init__(attrs, choices=CN_PROVINCE_CHOICES)
class CNPostCodeField(RegexField):
"""
A form field that validates input as postal codes in mainland China.
Valid codes are in the format of XXXXXX where X is a digit.
"""
default_error_messages = {
'invalid': _('Enter a post code in the format XXXXXX.'),
}
def __init__(self, *args, **kwargs):
super(CNPostCodeField, self).__init__(POST_CODE_RE, *args, **kwargs)
class CNIDCardField(CharField):
"""
A form field that validates input as a Resident Identity Card (PRC) number.
This field would check the following restrictions:
* the length could only be 15 or 18;
* if the length is 18, the last character can be x or X;
* has a valid checksum (only for those with a length of 18);
* has a valid date of birth;
* has a valid province.
The checksum algorithm is described in GB11643-1999.
See: http://en.wikipedia.org/wiki/Resident_Identity_Card#Identity_card_number
"""
default_error_messages = {
'invalid': _('ID Card Number consists of 15 or 18 digits.'),
'checksum': _('Invalid ID Card Number: Wrong checksum'),
'birthday': _('Invalid ID Card Number: Wrong birthdate'),
'location': _('Invalid ID Card Number: Wrong location code'),
}
def __init__(self, max_length=18, min_length=15, *args, **kwargs):
super(CNIDCardField, self).__init__(max_length, min_length, *args, **kwargs)
def clean(self, value):
"""
Check whether the input is a valid ID Card Number.
"""
# Check the length of the ID card number.
super(CNIDCardField, self).clean(value)
if not value:
return ""
# Check whether this ID card number has valid format
if not re.match(ID_CARD_RE, value):
raise ValidationError(self.error_messages['invalid'])
# Check the birthday of the ID card number.
if not self.has_valid_birthday(value):
raise ValidationError(self.error_messages['birthday'])
# Check the location of the ID card number.
if not self.has_valid_location(value):
raise ValidationError(self.error_messages['location'])
# Check the checksum of the ID card number.
value = value.upper()
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return '%s' % value
def has_valid_birthday(self, value):
"""
This method would grab the date of birth from the ID card number and
test whether it is a valid date.
"""
from datetime import datetime
if len(value) == 15:
# 1st generation ID card
time_string = value[6:12]
format_string = "%y%m%d"
else:
# 2nd generation ID card
time_string = value[6:14]
format_string = "%Y%m%d"
try:
datetime.strptime(time_string, format_string)
return True
except ValueError:
# invalid date
return False
def has_valid_location(self, value):
"""
This method checks if the first two digits in the ID Card are
valid province code.
"""
return int(value[:2]) in CN_LOCATION_CODES
def has_valid_checksum(self, value):
"""
This method checks if the last letter/digit is valid according to
GB11643-1999.
"""
# If the length of the number is not 18, then the number is a 1st
# generation ID card number, and there is no checksum to be checked.
if len(value) != 18:
return True
checksum_index = sum(
map(lambda a, b: a * (ord(b) - ord('0')),
(7, 9, 10, 5, 8, 4, 2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2),
value[:17],),
) % 11
return '10X98765432'[checksum_index] == value[-1]
class CNPhoneNumberField(RegexField):
"""
A form field that validates input as a telephone number in mainland China.
A valid phone number could be like: 010-12345678.
Considering there might be extension numbers,
this could also be: 010-12345678-35.
"""
default_error_messages = {
'invalid': _('Enter a valid phone number.'),
}
def __init__(self, *args, **kwargs):
super(CNPhoneNumberField, self).__init__(PHONE_RE, *args, **kwargs)
class CNCellNumberField(RegexField):
"""
A form field that validates input as a cellphone number in mainland China.
A valid cellphone number could be like: 13012345678.
A very rough rule is used here: the first digit should be 1, the second
should be 3, 4, 5, 7 or 8, followed by 9 more digits.
The total length of a cellphone number should be 11.
.. versionchanged:: 1.1
Added 7 as a valid second digit for Chinese virtual mobile ISPs.
"""
default_error_messages = {
'invalid': _('Enter a valid cell number.'),
}
def __init__(self, *args, **kwargs):
super(CNCellNumberField, self).__init__(CELL_RE, *args, **kwargs)
|
|
''' Naive Bayes Clustering '''
import numpy as np
import random
from spnss.mathutil import argmax_fair, argmin_fair, logsumexp, log
import knobs
from fast import nbmix_likelihood_from_model as fast_nbmix_likelihood_from_model
from history import best_index
def nbmix_model(data, nvals, qa, smooth=0.1):
data = data.astype(np.int, copy=False)
nc = qa.max() + 1
n = data.shape[1]
m = data.shape[0]
# compute params for NB models
lprobs = float('-inf')*np.ones( (n, nc, max(nvals)) )
priors = np.zeros(nc)
for i in range(nc):
di = data[qa==i]
di_size = float(len(di))
priors[i] = log(di_size / m)
for j in range(n):
bc = np.bincount(di[:,j], minlength=nvals[j])
for k in range(nvals[j]):
c = bc[k]
if c == 0:
continue
lprobs[j,i,k] = log((c + smooth) / (di_size + smooth*nvals[j]))
return lprobs, priors
def nbmix_likelihood_from_model(data, lprobs, priors):
nc = lprobs[0].shape[0]
lp = np.zeros(nc)
ll = 0.0
for d in data:
lp[:] = 0.0
for j in range(data.shape[1]):
lp += lprobs[j,:,d[j]]
lp += priors
ll += logsumexp(lp)
return ll
def nbmix_likelihood(data, nvals, qa, smooth=0.1):
lprobs, priors = nbmix_model(data, nvals, qa, smooth)
return fast_nbmix_likelihood_from_model(np.asarray(data, order='c'), lprobs, priors)
# nbll = nbmix_likelihood_from_model(data, lprobs, priors)
# assert np.allclose( nbll, fast_nbmix_likelihood_from_model(data, lprobs, priors) )
# exit()
def max_likelihood_qa(data, nvals, qa, approximate=False, smooth=0.1):
nc = qa.max() + 1
sil = np.zeros(nc)
for i in range(nc):
di = data[qa==i].astype(np.int)
total = float(len(di))
si = 0.0
for j in range(di.shape[1]):
bc = np.bincount(di[:,j])
for c in bc:
if c == 0:
continue
si += c*log(c / total)
si += log(total / len(data)) # cluster prior
sil[i] = si
s = sil.sum() if approximate else logsumexp(sil)
return s
def cluster_and_score(data, nvals, alg, k):
qa = alg(data, k)
nc = qa.max()+1
if nc < 2:
return None, None
#s = max_likelihood_qa(data, nvals, qa, False)
s = nbmix_likelihood(data, nvals, qa, 0.1)
s -= knobs.cluster_penalty * nc * data.shape[1] # why data.shape[1]?
return qa, s
def cluster_with_automatic_k_selection_linear(data, nvals, alg):
best_score, best_qa, best_k = float('-inf'), None, 2
for k in xrange( 2, max(3, len(data)/2) ):
qa, s = cluster_and_score(data, nvals, alg, k)
if qa is None:
break
if s > best_score:
best_score = s
best_qa = qa
best_k = k
elif k > 6 and k > 1.5 * best_k:
break
if best_score == 0.0:
break
if best_qa is None:
data = np.array(data)
raise Exception, 'cluster_with_automatic_k_selection_linear(): was not able to cluster into two or more clusters'
return best_qa, best_qa.max()+1
def cluster_with_automatic_k_selection_skip(data, nvals, alg, skip):
assert skip > 1
def insert_score(k):
if sqah[k] is None:
sqah[k] = cluster_and_score(data, nvals, alg, k)[::-1]
sqah = [None] * (4 + 2*skip)
i, m, j = 2, 2+skip, 2+2*skip
insert_score(i)
insert_score(m)
insert_score(j)
si, sm, sj = [sqah[k][0] for k in [i,m,j]]
if sm <= si:
ib = best_index(i, m, lambda k: sqah[k][0] if sqah[k] is not None else None, insert_score, float('-inf'))
sb, qab = sqah[ib]
return qab, qab.max()+1
while j < len(data)/2:
if sj <= sm:
break
sqah.extend( [None]*skip )
i = m
m = j
j += skip
insert_score(j)
si, sm, sj = [sqah[k][0] for k in [i,m,j]]
ib = best_index(i, j, lambda k: sqah[k][0] if sqah[k] is not None else None, insert_score, float('-inf'))
sb, qab = sqah[ib]
return qab, qab.max()+1
def cluster_with_automatic_k_selection_binary(data, nvals, alg):
assert len(data) >= 8
best_score, best_qa, best_k = float('-inf'), None, 2
intervals = [(2, min(2000, len(data) / 2))] # assume no cluster should have fewer than two data instances; limit to 2000 clusters or fewer
sqah = [None for i in range(3 + len(data)/2)]
while len(intervals) > 0:
i, j = intervals.pop()
m = int((i + j) / 2)
for k in [i, m, j]:
if sqah[k] is None:
sqah[k] = cluster_and_score(data, nvals, alg, k)[::-1]
si, sm, sj = [sqah[k][0] for k in [i,m,j]]
ib = [i,m,j][np.argmax( [sqah[k][0] for k in [i,m,j]] )]
sb, qab = sqah[ib]
if sb > best_score:
best_score = sb
best_qa = qab
best_k = ib
elif sb == best_score and ib < best_k:
best_qa = qab
best_k = ib
if sm > max(si, sj):
if i + 1 < m: intervals.append( (i, m) )
if m + 1 < j: intervals.append( (m, j) )
elif sm == si and sm >= sj:
continue
elif sm == sj and sm > si:
if i + 1 < m: intervals.append( (i, m) )
elif best_score > max(si, sj):
continue
elif si >= sj:
if i + 1 < m: intervals.append( (i, m) )
else:
if m + 1 < j: intervals.append( (m, j) )
if best_qa is None:
raise Exception, 'cluster_with_automatic_k_selection_binary(): was not able to cluster into two or more clusters'
return best_qa, best_qa.max()+1
def cluster_with_automatic_k_selection(data, nvals, alg):
#return cluster_with_automatic_k_selection_linear(data, nvals, alg)
if len(data) <= 30:
return cluster_with_automatic_k_selection_linear(data, nvals, alg)
else:
#return cluster_with_automatic_k_selection_binary(data, nvals, alg)
return cluster_with_automatic_k_selection_skip(data, nvals, alg, 20)
def kcluster(data, nvals):
data = data.astype(np.float)
from vlfwrap import KMeans
km = KMeans()
km.repetitions = 1
def kmeans(data, k):
km.cluster(data, k)
return km.quantize(data)
return cluster_with_automatic_k_selection(data, nvals, kmeans)
def kcluster_fixed(data, k):
data = data.astype(np.float)
from vlfwrap import KMeans
km = KMeans()
km.repetitions = 1
km.cluster(data, k)
qa = km.quantize(data)
nc = qa.max() + 1
# if nc != k and len(data) >= k:
# qa = np.arange(len(data))
# qa %= k
# nc = k
return qa, nc
def hamming(x1, x2):
assert len(x1) == len(x2)
return (x1 != x2).sum()
def ham_cluster(data, k):
m, n = data.shape
assert 1 <= k <= m
pts = range(m)
p = random.choice(pts)
pts.remove(p)
centers = [p]
distances = [[hamming(data[i], data[p])] for i in range(m)]
for i in range(k-1):
pi = argmax_fair([min(distances[q]) for q in pts])
p = pts[pi]
for q in pts:
distances[q].append( hamming(data[q], data[p]) )
centers.append(p)
pts.remove(p)
a = [argmin_fair(distances[q]) for q in range(m)] # assignments
return np.array(a)
def hcluster(data, nvals):
data = data.astype(np.float)
return cluster_with_automatic_k_selection(data, nvals, ham_cluster)
from fast import fast_ll
class Cluster():
def __init__(self, nvals):
self.nvals = nvals
self.smoo = 0.1
self.sstats = {}
self.size = 0
def add_inst(self, inst):
for i in xrange(len(self.nvals)):
v = inst[i]
if i not in self.sstats:
self.sstats[i] = [0]*self.nvals[i]
self.sstats[i][v] = self.sstats[i][v] + 1
self.size += 1
def remove_inst(self, inst):
self.size -= 1
for i in xrange(len(self.nvals)):
v = inst[i]
self.sstats[i][v] = self.sstats[i][v] - 1
def ll(self, inst, best_cll=float('-inf')):
sstats = self.sstats
l = 0.0
for i in xrange(len(self.nvals)):
v = inst[i]
w = sstats[i][v]
l += log((w + self.smoo) / (self.size + self.nvals[i]*self.smoo))
if l < best_cll:
return l
return l
def fast_ll(self, inst, best_cll):
return fast_ll(inst, self.sstats, self.nvals, self.size, self.smoo, best_cll)
def is_empty(self):
return self.size == 0
def penalized_ll(nbcs, data):
ll = 0.0
cluster_priors = [log(1.0 * c.size / len(data)) for c in nbcs]
for inst in data:
vals = [cluster_priors[i] + nbcs[i].fast_ll(inst, float('-inf')) for i in range(len(nbcs))]
ll += logsumexp( vals )
ll -= knobs.cluster_penalty * len(nbcs) * data.shape[1]
return ll
def new_ll(nvals, smooth=0.1):
return sum( log((1. + smooth) / (1. + v * smooth)) for v in nvals )
def inc_hard_em(data, nvals):
num_runs = 10
num_em_its = 4
best_ll = float('-inf')
best_qa = np.zeros(len(data), dtype=np.int)
instance_order = range(len(data))
new_cluster_ll = new_ll(nvals, 0.1)
new_cluster_penalized_ll = -knobs.cluster_penalty * 1 * len(nvals) + new_cluster_ll
for r in range(num_runs):
#print 'EM Run', r, 'with', len(instance_order), 'insts', len(nvals), 'vars'
nbcs = []
random.shuffle(instance_order)
inst_cluster_map = {}
it = 0
while it < num_em_its:
it += 1
ll = 0
for i in instance_order:
inst = data[i]
prev_cluster = inst_cluster_map.pop(i, None)
if prev_cluster is not None:
prev_cluster.remove_inst(inst)
if prev_cluster.is_empty():
nbcs.remove( prev_cluster )
best_cll = new_cluster_penalized_ll
best_cluster = None
for c in nbcs:
cll = c.fast_ll(inst, best_cll)
if cll > best_cll:
best_cll = cll
best_cluster = c
# make new cluster if best_cll is not greater than penalty
if best_cluster is None:
best_cluster = Cluster(nvals)
nbcs.append(best_cluster)
if len(nbcs) > 2000 and len(data) > 10000:
print 'Too many clusters, increase CP penalty'
exit()
best_cluster.add_inst(inst)
inst_cluster_map[i] = best_cluster
ll += best_cll
if len(nbcs) == 1:
it = 0
new_cluster_penalized_ll *= 0.5
# end em
ll = penalized_ll(nbcs, data)
if ll > best_ll:
best_ll = ll
c_indices = {c:i for i,c in enumerate(nbcs)}
best_qa[:] = [c_indices[inst_cluster_map[i]] for i in range(len(data))]
return best_qa, best_qa.max()+1
|
|
## Amazon S3 manager
## Author: Michal Ludvig <[email protected]>
## http://www.logix.cz/michal
## License: GPL Version 2
import datetime
import os
import sys
import time
import re
import string
import random
import rfc822
import hmac
import base64
import errno
import urllib
from logging import debug, info, warning, error
import Config
import Exceptions
# hashlib backported to python 2.4 / 2.5 is not compatible with hmac!
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
from md5 import md5
import sha as sha1
else:
from hashlib import md5, sha1
try:
import xml.etree.ElementTree as ET
except ImportError:
import elementtree.ElementTree as ET
from xml.parsers.expat import ExpatError
__all__ = []
def parseNodes(nodes):
## WARNING: Ignores text nodes from mixed xml/text.
## For instance <tag1>some text<tag2>other text</tag2></tag1>
## will be ignore "some text" node
retval = []
for node in nodes:
retval_item = {}
for child in node.getchildren():
name = child.tag
if child.getchildren():
retval_item[name] = parseNodes([child])
else:
retval_item[name] = node.findtext(".//%s" % child.tag)
retval.append(retval_item)
return retval
__all__.append("parseNodes")
def stripNameSpace(xml):
"""
removeNameSpace(xml) -- remove top-level AWS namespace
"""
r = re.compile('^(<?[^>]+?>\s?)(<\w+) xmlns=[\'"](http://[^\'"]+)[\'"](.*)', re.MULTILINE)
if r.match(xml):
xmlns = r.match(xml).groups()[2]
xml = r.sub("\\1\\2\\4", xml)
else:
xmlns = None
return xml, xmlns
__all__.append("stripNameSpace")
def getTreeFromXml(xml):
xml, xmlns = stripNameSpace(xml)
try:
tree = ET.fromstring(xml)
if xmlns:
tree.attrib['xmlns'] = xmlns
return tree
except ExpatError, e:
error(e)
raise Exceptions.ParameterError("Bucket contains invalid filenames. Please run: s3cmd fixbucket s3://your-bucket/")
__all__.append("getTreeFromXml")
def getListFromXml(xml, node):
tree = getTreeFromXml(xml)
nodes = tree.findall('.//%s' % (node))
return parseNodes(nodes)
__all__.append("getListFromXml")
def getDictFromTree(tree):
ret_dict = {}
for child in tree.getchildren():
if child.getchildren():
## Complex-type child. Recurse
content = getDictFromTree(child)
else:
content = child.text
if ret_dict.has_key(child.tag):
if not type(ret_dict[child.tag]) == list:
ret_dict[child.tag] = [ret_dict[child.tag]]
ret_dict[child.tag].append(content or "")
else:
ret_dict[child.tag] = content or ""
return ret_dict
__all__.append("getDictFromTree")
def getTextFromXml(xml, xpath):
tree = getTreeFromXml(xml)
if tree.tag.endswith(xpath):
return tree.text
else:
return tree.findtext(xpath)
__all__.append("getTextFromXml")
def getRootTagName(xml):
tree = getTreeFromXml(xml)
return tree.tag
__all__.append("getRootTagName")
def xmlTextNode(tag_name, text):
el = ET.Element(tag_name)
el.text = unicode(text)
return el
__all__.append("xmlTextNode")
def appendXmlTextNode(tag_name, text, parent):
"""
Creates a new <tag_name> Node and sets
its content to 'text'. Then appends the
created Node to 'parent' element if given.
Returns the newly created Node.
"""
el = xmlTextNode(tag_name, text)
parent.append(el)
return el
__all__.append("appendXmlTextNode")
def dateS3toPython(date):
date = re.compile("(\.\d*)?Z").sub(".000Z", date)
return time.strptime(date, "%Y-%m-%dT%H:%M:%S.000Z")
__all__.append("dateS3toPython")
def dateS3toUnix(date):
## FIXME: This should be timezone-aware.
## Currently the argument to strptime() is GMT but mktime()
## treats it as "localtime". Anyway...
return time.mktime(dateS3toPython(date))
__all__.append("dateS3toUnix")
def dateRFC822toPython(date):
return rfc822.parsedate(date)
__all__.append("dateRFC822toPython")
def dateRFC822toUnix(date):
return time.mktime(dateRFC822toPython(date))
__all__.append("dateRFC822toUnix")
def formatSize(size, human_readable = False, floating_point = False):
size = floating_point and float(size) or int(size)
if human_readable:
coeffs = ['k', 'M', 'G', 'T']
coeff = ""
while size > 2048:
size /= 1024
coeff = coeffs.pop(0)
return (size, coeff)
else:
return (size, "")
__all__.append("formatSize")
def formatDateTime(s3timestamp):
try:
import pytz
timezone = pytz.timezone(os.environ.get('TZ', 'UTC'))
tz = pytz.timezone('UTC')
## Can't unpack args and follow that with kwargs in python 2.5
## So we pass them all as kwargs
params = zip(('year', 'month', 'day', 'hour', 'minute', 'second', 'tzinfo'),
dateS3toPython(s3timestamp)[0:6] + (tz,))
params = dict(params)
utc_dt = datetime.datetime(**params)
dt_object = utc_dt.astimezone(timezone)
except ImportError:
dt_object = datetime.datetime(*dateS3toPython(s3timestamp)[0:6])
return dt_object.strftime("%Y-%m-%d %H:%M")
__all__.append("formatDateTime")
def convertTupleListToDict(list):
retval = {}
for tuple in list:
retval[tuple[0]] = tuple[1]
return retval
__all__.append("convertTupleListToDict")
_rnd_chars = string.ascii_letters+string.digits
_rnd_chars_len = len(_rnd_chars)
def rndstr(len):
retval = ""
while len > 0:
retval += _rnd_chars[random.randint(0, _rnd_chars_len-1)]
len -= 1
return retval
__all__.append("rndstr")
def mktmpsomething(prefix, randchars, createfunc):
old_umask = os.umask(0077)
tries = 5
while tries > 0:
dirname = prefix + rndstr(randchars)
try:
createfunc(dirname)
break
except OSError, e:
if e.errno != errno.EEXIST:
os.umask(old_umask)
raise
tries -= 1
os.umask(old_umask)
return dirname
__all__.append("mktmpsomething")
def mktmpdir(prefix = os.getenv('TMP','/tmp') + "/tmpdir-", randchars = 10):
return mktmpsomething(prefix, randchars, os.mkdir)
__all__.append("mktmpdir")
def mktmpfile(prefix = os.getenv('TMP','/tmp') + "/tmpfile-", randchars = 20):
createfunc = lambda filename : os.close(os.open(filename, os.O_CREAT | os.O_EXCL))
return mktmpsomething(prefix, randchars, createfunc)
__all__.append("mktmpfile")
def hash_file_md5(filename):
h = md5()
f = open(filename, "rb")
while True:
# Hash 32kB chunks
data = f.read(32*1024)
if not data:
break
h.update(data)
f.close()
return h.hexdigest()
__all__.append("hash_file_md5")
def mkdir_with_parents(dir_name):
"""
mkdir_with_parents(dst_dir)
Create directory 'dir_name' with all parent directories
Returns True on success, False otherwise.
"""
pathmembers = dir_name.split(os.sep)
tmp_stack = []
while pathmembers and not os.path.isdir(os.sep.join(pathmembers)):
tmp_stack.append(pathmembers.pop())
while tmp_stack:
pathmembers.append(tmp_stack.pop())
cur_dir = os.sep.join(pathmembers)
try:
debug("mkdir(%s)" % cur_dir)
os.mkdir(cur_dir)
except (OSError, IOError), e:
warning("%s: can not make directory: %s" % (cur_dir, e.strerror))
return False
except Exception, e:
warning("%s: %s" % (cur_dir, e))
return False
return True
__all__.append("mkdir_with_parents")
def unicodise(string, encoding = None, errors = "replace"):
"""
Convert 'string' to Unicode or raise an exception.
"""
if not encoding:
encoding = Config.Config().encoding
if type(string) == unicode:
return string
debug("Unicodising %r using %s" % (string, encoding))
try:
return string.decode(encoding, errors)
except UnicodeDecodeError:
raise UnicodeDecodeError("Conversion to unicode failed: %r" % string)
__all__.append("unicodise")
def deunicodise(string, encoding = None, errors = "replace"):
"""
Convert unicode 'string' to <type str>, by default replacing
all invalid characters with '?' or raise an exception.
"""
if not encoding:
encoding = Config.Config().encoding
if type(string) != unicode:
return str(string)
debug("DeUnicodising %r using %s" % (string, encoding))
try:
return string.encode(encoding, errors)
except UnicodeEncodeError:
raise UnicodeEncodeError("Conversion from unicode failed: %r" % string)
__all__.append("deunicodise")
def unicodise_safe(string, encoding = None):
"""
Convert 'string' to Unicode according to current encoding
and replace all invalid characters with '?'
"""
return unicodise(deunicodise(string, encoding), encoding).replace(u'\ufffd', '?')
__all__.append("unicodise_safe")
def replace_nonprintables(string):
"""
replace_nonprintables(string)
Replaces all non-printable characters 'ch' in 'string'
where ord(ch) <= 26 with ^@, ^A, ... ^Z
"""
new_string = ""
modified = 0
for c in string:
o = ord(c)
if (o <= 31):
new_string += "^" + chr(ord('@') + o)
modified += 1
elif (o == 127):
new_string += "^?"
modified += 1
else:
new_string += c
if modified and Config.Config().urlencoding_mode != "fixbucket":
warning("%d non-printable characters replaced in: %s" % (modified, new_string))
return new_string
__all__.append("replace_nonprintables")
def sign_string(string_to_sign):
"""Sign a string with the secret key, returning base64 encoded results.
By default the configured secret key is used, but may be overridden as
an argument.
Useful for REST authentication. See http://s3.amazonaws.com/doc/s3-developer-guide/RESTAuthentication.html
"""
signature = base64.encodestring(hmac.new(Config.Config().secret_key, string_to_sign, sha1).digest()).strip()
return signature
__all__.append("sign_string")
def sign_url(url_to_sign, expiry):
"""Sign a URL in s3://bucket/object form with the given expiry
time. The object will be accessible via the signed URL until the
AWS key and secret are revoked or the expiry time is reached, even
if the object is otherwise private.
See: http://s3.amazonaws.com/doc/s3-developer-guide/RESTAuthentication.html
"""
return sign_url_base(
bucket = url_to_sign.bucket(),
object = url_to_sign.object(),
expiry = expiry
)
__all__.append("sign_url")
def sign_url_base(**parms):
"""Shared implementation of sign_url methods. Takes a hash of 'bucket', 'object' and 'expiry' as args."""
parms['expiry']=time_to_epoch(parms['expiry'])
parms['access_key']=Config.Config().access_key
debug("Expiry interpreted as epoch time %s", parms['expiry'])
signtext = 'GET\n\n\n%(expiry)d\n/%(bucket)s/%(object)s' % parms
debug("Signing plaintext: %r", signtext)
parms['sig'] = urllib.quote_plus(sign_string(signtext))
debug("Urlencoded signature: %s", parms['sig'])
return "http://%(bucket)s.s3.amazonaws.com/%(object)s?AWSAccessKeyId=%(access_key)s&Expires=%(expiry)d&Signature=%(sig)s" % parms
def time_to_epoch(t):
"""Convert time specified in a variety of forms into UNIX epoch time.
Accepts datetime.datetime, int, anything that has a strftime() method, and standard time 9-tuples
"""
if isinstance(t, int):
# Already an int
return t
elif isinstance(t, tuple) or isinstance(t, time.struct_time):
# Assume it's a time 9-tuple
return int(time.mktime(t))
elif hasattr(t, 'timetuple'):
# Looks like a datetime object or compatible
return int(time.mktime(t.timetuple()))
elif hasattr(t, 'strftime'):
# Looks like the object supports standard srftime()
return int(t.strftime('%s'))
elif isinstance(t, str) or isinstance(t, unicode):
# See if it's a string representation of an epoch
try:
return int(t)
except ValueError:
# Try to parse it as a timestamp string
try:
return time.strptime(t)
except ValueError, ex:
# Will fall through
debug("Failed to parse date with strptime: %s", ex)
pass
raise Exceptions.ParameterError('Unable to convert %r to an epoch time. Pass an epoch time. Try `date -d \'now + 1 year\' +%%s` (shell) or time.mktime (Python).' % t)
def check_bucket_name(bucket, dns_strict = True):
if dns_strict:
invalid = re.search("([^a-z0-9\.-])", bucket)
if invalid:
raise Exceptions.ParameterError("Bucket name '%s' contains disallowed character '%s'. The only supported ones are: lowercase us-ascii letters (a-z), digits (0-9), dot (.) and hyphen (-)." % (bucket, invalid.groups()[0]))
else:
invalid = re.search("([^A-Za-z0-9\._-])", bucket)
if invalid:
raise Exceptions.ParameterError("Bucket name '%s' contains disallowed character '%s'. The only supported ones are: us-ascii letters (a-z, A-Z), digits (0-9), dot (.), hyphen (-) and underscore (_)." % (bucket, invalid.groups()[0]))
if len(bucket) < 3:
raise Exceptions.ParameterError("Bucket name '%s' is too short (min 3 characters)" % bucket)
if len(bucket) > 255:
raise Exceptions.ParameterError("Bucket name '%s' is too long (max 255 characters)" % bucket)
if dns_strict:
if len(bucket) > 63:
raise Exceptions.ParameterError("Bucket name '%s' is too long (max 63 characters)" % bucket)
if re.search("-\.", bucket):
raise Exceptions.ParameterError("Bucket name '%s' must not contain sequence '-.' for DNS compatibility" % bucket)
if re.search("\.\.", bucket):
raise Exceptions.ParameterError("Bucket name '%s' must not contain sequence '..' for DNS compatibility" % bucket)
if not re.search("^[0-9a-z]", bucket):
raise Exceptions.ParameterError("Bucket name '%s' must start with a letter or a digit" % bucket)
if not re.search("[0-9a-z]$", bucket):
raise Exceptions.ParameterError("Bucket name '%s' must end with a letter or a digit" % bucket)
return True
__all__.append("check_bucket_name")
def check_bucket_name_dns_conformity(bucket):
try:
return check_bucket_name(bucket, dns_strict = True)
except Exceptions.ParameterError:
return False
__all__.append("check_bucket_name_dns_conformity")
def getBucketFromHostname(hostname):
"""
bucket, success = getBucketFromHostname(hostname)
Only works for hostnames derived from bucket names
using Config.host_bucket pattern.
Returns bucket name and a boolean success flag.
"""
# Create RE pattern from Config.host_bucket
pattern = Config.Config().host_bucket % { 'bucket' : '(?P<bucket>.*)' }
m = re.match(pattern, hostname)
if not m:
return (hostname, False)
return m.groups()[0], True
__all__.append("getBucketFromHostname")
def getHostnameFromBucket(bucket):
return Config.Config().host_bucket % { 'bucket' : bucket }
__all__.append("getHostnameFromBucket")
def calculateChecksum(buffer, mfile, offset, chunk_size, send_chunk):
md5_hash = md5()
size_left = chunk_size
if buffer == '':
mfile.seek(offset)
while size_left > 0:
data = mfile.read(min(send_chunk, size_left))
md5_hash.update(data)
size_left -= len(data)
else:
md5_hash.update(buffer)
return md5_hash.hexdigest()
__all__.append("calculateChecksum")
# vim:et:ts=4:sts=4:ai
|
|
"""
Act as a wrapper around pandas_datareader and write the responses to the
database to be accessed later.
"""
import datetime as dt
import logging
from typing import Dict, Iterable, Union, Tuple
import numpy as np
import pandas as pd
import pandas_datareader as pdr
from arctic.date import DateRange
from arctic.exceptions import NoDataFoundException
from pandas.tseries.offsets import BDay
from pandas_datareader._utils import RemoteDataError
import pytech.utils.dt_utils as dt_utils
import pytech.utils.pandas_utils as pd_utils
from pytech.decorators.decorators import write_chunks
from pytech.mongo import ARCTIC_STORE
from pytech.mongo.barstore import BarStore
from pytech.utils.exceptions import DataAccessError
from pytech.data._holders import DfLibName
logger = logging.getLogger(__name__)
ticker_input = Union[Iterable, str, pd.DataFrame]
range_type = Union[pd.DatetimeIndex, DateRange]
YAHOO = 'yahoo'
GOOGLE = 'google'
FRED = 'fred'
FAMA_FRENCH = 'famafrench'
class BarReader(object):
"""Read and write data from the DB and the web."""
def __init__(self, lib_name: str):
self.lib_name = lib_name
if lib_name not in ARCTIC_STORE.list_libraries():
# create the lib if it does not already exist
ARCTIC_STORE.initialize_library(lib_name,
BarStore.LIBRARY_TYPE)
self.lib = ARCTIC_STORE[self.lib_name]
def get_data(self,
tickers: ticker_input,
source: str = GOOGLE,
start: dt.datetime = None,
end: dt.datetime = None,
check_db: bool = True,
filter_data: bool = True,
**kwargs) -> Union[pd.DataFrame, Dict[str, pd.DataFrame]]:
"""
Get data and create a :class:`pd.DataFrame` from it.
:param tickers: The ticker(s) that data will be retrieved for.
:param source: The data source. Options:
* yahoo
* google
* fred
* famafrench
* db
* anything else pandas_datareader supports
:param start: Left boundary for range.
defaults to 1/1/2010.
:param end: Right boundary for range.
defaults to today.
:param check_db: Check the database first before making network call.
:param filter_data: Filter data from the DB. Only used if `check_db` is
`True`.
:param kwargs: kwargs are passed blindly to `pandas_datareader`
:return: A `dict[ticker, DataFrame]`.
"""
start, end = dt_utils.sanitize_dates(start, end)
if isinstance(tickers, str):
try:
df_lib_name = self._single_get_data(tickers, source, start,
end, check_db, filter_data,
**kwargs)
return df_lib_name.df
except DataAccessError as e:
raise DataAccessError(
f'Could not get data for ticker: {tickers}') from e
else:
if isinstance(tickers, pd.DataFrame):
tickers = tickers.index
try:
return self._mult_tickers_get_data(tickers, source, start, end,
check_db, filter_data,
**kwargs)
except DataAccessError as e:
raise e
def _mult_tickers_get_data(self,
tickers: Iterable,
source: str,
start: dt.datetime,
end: dt.datetime,
check_db: bool,
filter_data: bool,
**kwargs) -> Dict[str, pd.DataFrame]:
"""Download data for multiple tickers."""
stocks = {}
failed = []
passed = []
for t in tickers:
try:
df_lib_name = self._single_get_data(t, source, start, end,
check_db, filter_data,
**kwargs)
stocks[t] = df_lib_name.df
passed.append(t)
except DataAccessError:
failed.append(t)
if len(passed) == 0:
raise DataAccessError('No data could be retrieved.')
if len(stocks) > 0 and len(failed) > 0 and len(passed) > 0:
df_na = stocks[passed[0]].copy()
df_na[:] = np.nan
for t in failed:
logger.warning(f'No data could be retrieved for ticker: {t}, '
f'replacing with NaN.')
stocks[t] = df_na
return stocks
def _single_get_data(self,
ticker: str,
source: str,
start: dt.datetime,
end: dt.datetime,
check_db: bool,
filter_data: bool,
**kwargs):
"""Do the get data method for a single ticker."""
if check_db:
try:
return self._from_db(ticker, source, start, end,
filter_data, **kwargs)
except DataAccessError:
# don't raise, try to make the network call
logger.info(f'Ticker: {ticker} not found in DB.')
try:
return self._from_web(ticker, source, start, end, **kwargs)
except DataAccessError:
logger.warning(f'Error getting data from {source} '
f'for ticker: {ticker}')
raise
@write_chunks()
def _from_web(self,
ticker: str,
source: str,
start: dt.datetime,
end: dt.datetime,
**kwargs) -> DfLibName:
"""Retrieve data from a web source"""
_ = kwargs.pop('columns', None)
try:
logger.info(f'Making call to {source}. Start date: {start},'
f'End date: {end}')
df = pdr.DataReader(ticker, data_source=source, start=start,
end=end, **kwargs)
if df.empty:
logger.warning('df retrieved was empty.')
# the string should be ignored anyway
return DfLibName(df, lib_name=self.lib_name)
except RemoteDataError as e:
logger.warning(f'Error occurred getting data from {source}')
raise DataAccessError from e
else:
df = pd_utils.rename_bar_cols(df)
df[pd_utils.TICKER_COL] = ticker
if source == YAHOO:
# yahoo doesn't set the index :(
df = df.set_index([pd_utils.DATE_COL])
else:
df.index.name = pd_utils.DATE_COL
return DfLibName(df, lib_name=self.lib_name)
def _from_db(self,
ticker: str,
source: str,
start: dt.datetime,
end: dt.datetime,
filter_data: bool = True,
**kwargs) -> DfLibName:
"""
Try to read data from the DB.
:param ticker: The ticker to retrieve from the DB.
:param source: Only used if there there is not enough data in the DB.
:param start: The start of the range.
:param end: The end of the range.
:param filter_data: Passed to the read method.
:param kwargs: Passed to the read method.
:return: The data frame.
:raises: NoDataFoundException if no data is found for the given ticker.
"""
chunk_range = DateRange(start=start, end=end)
try:
logger.info(f'Checking DB for ticker: {ticker}')
df = self.lib.read(ticker, chunk_range=chunk_range,
filter_data=filter_data, **kwargs)
except NoDataFoundException as e:
raise DataAccessError(f'No data in DB for ticker: {ticker}') from e
except KeyError as e:
# TODO: open a bug report against arctic...
logger.warning('KeyError thrown by Arctic...', e)
raise DataAccessError(
f'Error reading DB for ticker: {ticker}') from e
logger.debug(f'Found ticker: {ticker} in DB.')
db_start = dt_utils.parse_date(df.index.min(axis=1))
db_end = dt_utils.parse_date(df.index.max(axis=1))
# check that all the requested data is present
# TODO: deal with days that it is expected that data shouldn't exist.
if db_start > start and dt_utils.is_trade_day(start):
# db has less data than requested
lower_df_lib_name = self._from_web(ticker, source, start,
db_start - BDay())
lower_df = lower_df_lib_name.df
else:
lower_df = None
if db_end.date() < end.date() and dt_utils.is_trade_day(end):
# db doesn't have as much data than requested
upper_df_lib_name = self._from_web(ticker, source, db_end, end)
upper_df = upper_df_lib_name.df
else:
upper_df = None
new_df = _concat_dfs(lower_df, upper_df, df)
return DfLibName(new_df, self.lib_name)
def get_symbols(self):
for s in self.lib.list_symbols():
yield s
def _concat_dfs(lower_df: pd.DataFrame,
upper_df: pd.DataFrame,
df: pd.DataFrame) -> pd.DataFrame:
"""
Helper method to concat the missing data frames, where `df` is the original
df.
"""
if lower_df is None and upper_df is None:
# everything is already in the df
return df
elif lower_df is not None and upper_df is None:
# missing only lower data
return pd.DataFrame(pd.concat([df, lower_df]))
elif lower_df is None and upper_df is not None:
# missing only upper data
return pd.DataFrame(pd.concat([df, upper_df]))
elif lower_df is not None and upper_df is not None:
# both missing
return pd.DataFrame(pd.concat([df, upper_df, lower_df]))
else:
return df
def load_from_csv(path: str,
start: dt.datetime = None,
end: dt.datetime = None) -> None:
"""
Load a list of tickers from a CSV, and download the data for the
requested period.
:param path: The path to the CSV file.
:param start: The start date to use for the data download.
:param end: The end date to use for the data download.
"""
|
|
from canon.mpi.init import *
import numpy as np
import os
import sys
from timeit import default_timer as timer
from itertools import groupby
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from canon.dat.datreader import read_dats, read_txt, idx2XY, blacklist
from canon.pattern.feature_extractor import AllPeaksExtractor, PeaksNumberExtractor, CombinedExtractor
from canon.pattern.model import GMMModel
from canon.pattern.labeler import SeqLabeler
read_file = read_dats
def split_workload(loads, nproc):
load_groups = [[] for _ in xrange(nproc)]
for i, l in enumerate(loads):
load_groups[i % len(load_groups)].append(l)
return load_groups
def read_sample_patterns(dir_path, NX, step):
if MPI_RANK == 0:
t0 = timer()
filenames = [filename for filename in os.listdir(dir_path)]
logging.debug('Found %d files in the directory %s.' % (len(filenames), dir_path))
sample_files = []
for filename in filenames:
if filename in blacklist:
continue
X, Y = idx2XY(int(filename[-9:-4]), NX)
if X % step[0] == 0 and Y % step[1] == 0:
sample_files.append(os.path.join(dir_path, filename))
logging.debug('Selected %d sample files according to step size %s.' % (len(sample_files), step))
file_groups = split_workload(sample_files, MPI_COMM.size)
else:
file_groups = None
filenames = MPI_COMM.scatter(file_groups, root=0)
logging.debug('Assigned %d DAT files to read.' % len(filenames))
t0_loc = timer()
patterns = read_file(filenames)
logging.debug('Got %d [local] sample patterns. %g sec' % (len(patterns), timer() - t0_loc))
patterns = MPI_COMM.gather(patterns, root=0)
if MPI_RANK == 0:
patterns = [t for g in patterns for t in g]
logging.info('Gathered %d sample patterns in total. %g sec' % (len(patterns), timer() - t0))
return patterns
def split_patterns_mpi(patterns):
if MPI_RANK == 0:
pattern_groups = split_workload(patterns, MPI_COMM.size)
else:
pattern_groups = None
group = MPI_COMM.scatter(pattern_groups, root=0)
return group
def merge_fts_mpi(data_loc):
data_stacked = MPI_COMM.gather(data_loc, root=0)
if MPI_RANK == 0:
data_merged = [o for l in data_stacked for o in l]
logging.info('Gathered %d data points of %d features. Total size = %.2f MB' %
(len(data_merged), len(data_loc[0]), sys.getsizeof(data_merged) / (1024. ** 2)))
return data_merged
def extract_features(extractor, sample_patterns):
extractor = MPI_COMM.bcast(extractor, root=0)
patterns_loc = split_patterns_mpi(sample_patterns)
logging.debug('Assigned %d [local] patterns to extract features' % len(patterns_loc))
t0_loc = timer()
data_loc = map(extractor.features, patterns_loc)
logging.debug('Extracted %d features x %d [local] patterns. %g sec' %
(len(data_loc[0]), len(patterns_loc), timer() - t0_loc))
data = np.array(merge_fts_mpi(data_loc))
if MPI_RANK == 0:
logging.info('Extracted %d features x %d patterns. %g sec' %
(len(data[0]), len(sample_patterns), timer() - t0_loc))
return data
def score_dir(extractor, model, dir_path, limit=None, batch_size=100):
if MPI_RANK == 0:
filenames = [os.path.join(dir_path, filename) for filename in os.listdir(dir_path) if filename not in blacklist]
logging.debug('Found %d files in the directory %s.' % (len(filenames), dir_path))
limit = len(filenames) if limit is None else min(limit, len(filenames))
file_groups = split_workload(filenames[:limit], MPI_COMM.size)
else:
file_groups = None
filenames = MPI_COMM.scatter(file_groups, root=0)
logging.debug('Received %d files to score.' % len(filenames))
t0 = timer()
scoresinds_loc = score_files_loc(extractor, model, filenames, batch_size=batch_size)
scoresinds_stack = MPI_COMM.gather(scoresinds_loc, root=0)
if MPI_RANK == 0:
scoresinds = sum(scoresinds_stack, [])
logging.info('Scored %d patterns. %g sec' % (len(scoresinds), timer() - t0))
return scoresinds
def score_files_loc(extractor, model, filenames, batch_size=None):
t0 = timer()
if batch_size is None:
nbatches = 1
else:
nbatches = max(1, int(len(filenames) / batch_size))
file_batches = split_workload(filenames, nbatches)
logging.debug('Split files to be scored into %d batches.' % nbatches)
scoreinds = []
for file_batch in file_batches:
scoreinds += score_batch(extractor, model, file_batch)
logging.debug('Scored %d [local] patterns. %g sec' % (len(scoreinds), timer() - t0))
return scoreinds
def score_batch(extractor, model, filenames):
t0 = timer()
indices = [int(f[-9:-4]) for f in filenames]
patterns = read_file(filenames)
scores = model.score(np.array(map(extractor.features, patterns)))
logging.debug('Scored a batch of %d [local] patterns, %d are [None]. %g sec'
% (len(filenames), sum(1 for s in scores if s is None), timer() - t0))
return [(s, i) for (s, i) in zip(scores, indices) if s is not None]
def relabel(labeler, scoreinds):
t0 = timer()
# split scores into groups by label
if MPI_RANK == 0:
groups = []
for k, g in groupby(sorted(scoreinds, key=lambda si: si[0][0]), key=lambda si: si[0][0]):
groups.append(list(g))
sub_groups = split_workload(sorted(groups, key=len), MPI_COMM.size)
else:
sub_groups = 0
sub_groups = MPI_COMM.scatter(sub_groups, root=0)
logging.debug('Got %d [local] groups of scores to re-label, which is %d in total.' %
(len(sub_groups), sum(map(len, sub_groups))))
new_scoreinds_loc = relabel_score_groups(labeler, sub_groups)
new_scoreinds_stack = MPI_COMM.gather(new_scoreinds_loc, root=0)
if MPI_RANK == 0:
new_scoreinds = sum(new_scoreinds_stack, [])
logging.info('Re-labeled %d scores. %g sec' % (len(new_scoreinds), timer() - t0))
return new_scoreinds
def relabel_score_groups(labeler, groups):
t0 = timer()
new_scoreinds = []
for scoreinds in groups:
scoreinds = [si for si in scoreinds if si[0] is not None]
scoreinds = sorted(scoreinds, key=lambda si: si[0][1], reverse=True)
weighted_scores = []
for si in scoreinds[:min(len(scoreinds), 1000)]:
score = labeler.evaluate(si[1])
if score is not None:
weighted_scores.append((score, si[0][1]))
centroid_score = np.sum([s * w for s, w in weighted_scores])/np.sum([w for _, w in weighted_scores]) \
if len(weighted_scores) > 0 else None
new_scoreinds += [(centroid_score, si[1]) for si in scoreinds]
if centroid_score is None:
logging.warn('%d scores in cluster %d are re-labeled to [None]!' % (len(scoreinds), scoreinds[0][0][0]))
logging.info('Re-labeled %d [local] scores. %g sec' % (sum(map(len, groups)), timer() - t0))
return new_scoreinds
if __name__ == '__main__':
# Au30
read_file = read_dats
case_name = 'au31_area'
scratch = "/Users/sherrychen/scratch/"
dir_path = scratch + "peaks/dat/" + case_name
seq_files = [scratch + "seqfiles/" + f for f in ('au31_refined_a_.SEQ', 'au31_refined_.SEQ')] # 'm4fine_a.SEQ',
all_peaks_threshold = 0.8
# if MPI_RANK == 0:
# labeler = SeqLabeler(seq_files)
NX = 20
NY = 100
step = (5, 5)
sample_patterns = read_sample_patterns(dir_path, NX, (2, 2)) # sample_patterns on lives on core-0
# # quartz
# case_name = 'quartz_500mpa'
# scratch = "/Users/sherrychen/scratch/"
# dir_path = scratch + "peaks/dat/" + case_name
# seq_files = (scratch + "seqfiles/" + 'Quartz_500Mpa_.SEQ', )
# NX = 120
# NY = 120
# step = (5, 5)
# all_peaks_threshold = 0.9
# sample_patterns = read_sample_patterns(dir_path, NX, (4, 4)) # sample_patterns on lives on core-0
if MPI_RANK == 0:
t0 = timer()
extractor1 = AllPeaksExtractor(sample_patterns, intensity_threshold=all_peaks_threshold, gaussion_height=1, gaussian_width=5)
extractor2 = PeaksNumberExtractor(intensity_threshold=0.0)
extractor = CombinedExtractor([extractor2, extractor1])
# extractor = extractor2
logging.info("Constructed a feature extractor. %g sec" % (timer() - t0))
else:
extractor = None
data = extract_features(extractor, sample_patterns)
extractor = MPI_COMM.bcast(extractor, root=0)
if MPI_RANK == 0:
model = GMMModel()
model.train(np.array(data), preprocessors=[StandardScaler()])
# model.train(np.array(data), preprocessors=[StandardScaler(), PCA(whiten=True)])
else:
model = None
model = MPI_COMM.bcast(model, root=0)
scoreinds = score_dir(extractor, model, dir_path, limit=None, batch_size=200)
if MPI_RANK == 0:
labeler = SeqLabeler(seq_files)
else:
labeler = None
labeler = MPI_COMM.bcast(labeler, root=0)
scoreinds = relabel(labeler, scoreinds)
if MPI_RANK == 0:
Z = np.empty([NY, NX])
Z[:] = np.nan
for score, idx in scoreinds:
if score is not None:
ix, iy = idx2XY(idx, NX)
if ix < NY:
Z[ix, iy] = score
logging.debug('Z matrix has %d nans' % sum(1 for row in Z for z in row if np.isnan(z)))
np.savetxt('Z_au31.txt', Z)
logging.info('Write Z matrix into Z_au31.txt in ' + os.path.dirname(os.path.abspath(__file__)))
from plotseq import plot_seq
# # Z = np.loadtxt('Z.txt')
plot_seq(Z, step, colormap='jet', filename=scratch + "img/clustering_" + case_name)
|
|
from __future__ import unicode_literals
import django
from django import forms
from django.conf import settings
from guardian.compat import url, patterns
from django.contrib import admin
from django.contrib import messages
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template import RequestContext
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext, ugettext_lazy as _
from guardian.compat import get_user_model, get_model_name
from guardian.forms import UserObjectPermissionsForm
from guardian.forms import GroupObjectPermissionsForm
from guardian.shortcuts import get_perms
from guardian.shortcuts import get_users_with_perms
from guardian.shortcuts import get_groups_with_perms
from guardian.shortcuts import get_perms_for_model
from guardian.models import Group
class AdminUserObjectPermissionsForm(UserObjectPermissionsForm):
"""
Extends :form:`UserObjectPermissionsForm`. It only overrides
``get_obj_perms_field_widget`` method so it return
``django.contrib.admin.widgets.FilteredSelectMultiple`` widget.
"""
def get_obj_perms_field_widget(self):
return FilteredSelectMultiple(_("Permissions"), False)
class AdminGroupObjectPermissionsForm(GroupObjectPermissionsForm):
"""
Extends :form:`GroupObjectPermissionsForm`. It only overrides
``get_obj_perms_field_widget`` method so it return
``django.contrib.admin.widgets.FilteredSelectMultiple`` widget.
"""
def get_obj_perms_field_widget(self):
return FilteredSelectMultiple(_("Permissions"), False)
class GuardedModelAdminMixin(object):
"""
Serves as a helper for custom subclassing ``admin.ModelAdmin``.
"""
change_form_template = \
'admin/guardian/model/change_form.html'
obj_perms_manage_template = \
'admin/guardian/model/obj_perms_manage.html'
obj_perms_manage_user_template = \
'admin/guardian/model/obj_perms_manage_user.html'
obj_perms_manage_group_template = \
'admin/guardian/model/obj_perms_manage_group.html'
user_can_access_owned_objects_only = False
user_owned_objects_field = 'user'
user_can_access_owned_by_group_objects_only = False
group_owned_objects_field = 'group'
include_object_permissions_urls = True
def get_queryset(self, request):
# Prefer the Django >= 1.6 interface but maintain
# backward compatibility
method = getattr(
super(GuardedModelAdminMixin, self), 'get_queryset',
getattr(super(GuardedModelAdminMixin, self), 'queryset', None))
qs = method(request)
if request.user.is_superuser:
return qs
if self.user_can_access_owned_objects_only:
filters = {self.user_owned_objects_field: request.user}
qs = qs.filter(**filters)
if self.user_can_access_owned_by_group_objects_only:
User = get_user_model()
user_rel_name = User.groups.field.related_query_name()
qs_key = '%s__%s' % (self.group_owned_objects_field, user_rel_name)
filters = {qs_key: request.user}
qs = qs.filter(**filters)
return qs
# Allow queryset method as fallback for Django versions < 1.6
# for versions >= 1.6 this is taken care of by Django itself
# and triggers a warning message automatically.
if django.VERSION < (1, 6):
queryset = get_queryset
def get_urls(self):
"""
Extends standard admin model urls with the following:
- ``.../permissions/`` under ``app_mdodel_permissions`` url name (params: object_pk)
- ``.../permissions/user-manage/<user_id>/`` under ``app_model_permissions_manage_user`` url name (params: object_pk, user_pk)
- ``.../permissions/group-manage/<group_id>/`` under ``app_model_permissions_manage_group`` url name (params: object_pk, group_pk)
.. note::
``...`` above are standard, instance detail url (i.e.
``/admin/flatpages/1/``)
"""
urls = super(GuardedModelAdminMixin, self).get_urls()
if self.include_object_permissions_urls:
info = self.model._meta.app_label, get_model_name(self.model)
myurls = patterns('',
url(r'^(?P<object_pk>.+)/permissions/$',
view=self.admin_site.admin_view(self.obj_perms_manage_view),
name='%s_%s_permissions' % info),
url(r'^(?P<object_pk>.+)/permissions/user-manage/(?P<user_id>\-?\d+)/$',
view=self.admin_site.admin_view(
self.obj_perms_manage_user_view),
name='%s_%s_permissions_manage_user' % info),
url(r'^(?P<object_pk>.+)/permissions/group-manage/(?P<group_id>\-?\d+)/$',
view=self.admin_site.admin_view(
self.obj_perms_manage_group_view),
name='%s_%s_permissions_manage_group' % info),
)
urls = myurls + urls
return urls
def get_obj_perms_base_context(self, request, obj):
"""
Returns context dictionary with common admin and object permissions
related content.
"""
context = {
'adminform': {'model_admin': self},
'media': self.media,
'object': obj,
'app_label': self.model._meta.app_label,
'opts': self.model._meta,
'original': hasattr(obj, '__unicode__') and obj.__unicode__() or\
str(obj),
'has_change_permission': self.has_change_permission(request, obj),
'model_perms': get_perms_for_model(obj),
'title': _("Object permissions"),
}
return context
def obj_perms_manage_view(self, request, object_pk):
"""
Main object permissions view. Presents all users and groups with any
object permissions for the current model *instance*. Users or groups
without object permissions for related *instance* would **not** be
shown. In order to add or manage user or group one should use links or
forms presented within the page.
"""
obj = get_object_or_404(self.get_queryset(request), pk=object_pk)
users_perms = SortedDict(
get_users_with_perms(obj, attach_perms=True,
with_group_users=False))
users_perms.keyOrder.sort(key=lambda user:
getattr(user, get_user_model().USERNAME_FIELD))
groups_perms = SortedDict(
get_groups_with_perms(obj, attach_perms=True))
groups_perms.keyOrder.sort(key=lambda group: group.name)
if request.method == 'POST' and 'submit_manage_user' in request.POST:
user_form = UserManage(request.POST)
group_form = GroupManage()
info = (
self.admin_site.name,
self.model._meta.app_label,
get_model_name(self.model)
)
if user_form.is_valid():
user_id = user_form.cleaned_data['user'].pk
url = reverse(
'%s:%s_%s_permissions_manage_user' % info,
args=[obj.pk, user_id]
)
return redirect(url)
elif request.method == 'POST' and 'submit_manage_group' in request.POST:
user_form = UserManage()
group_form = GroupManage(request.POST)
info = (
self.admin_site.name,
self.model._meta.app_label,
get_model_name(self.model)
)
if group_form.is_valid():
group_id = group_form.cleaned_data['group'].id
url = reverse(
'%s:%s_%s_permissions_manage_group' % info,
args=[obj.pk, group_id]
)
return redirect(url)
else:
user_form = UserManage()
group_form = GroupManage()
context = self.get_obj_perms_base_context(request, obj)
context['users_perms'] = users_perms
context['groups_perms'] = groups_perms
context['user_form'] = user_form
context['group_form'] = group_form
return render_to_response(self.get_obj_perms_manage_template(),
context, RequestContext(request, current_app=self.admin_site.name))
def get_obj_perms_manage_template(self):
"""
Returns main object permissions admin template. May be overridden if
need to change it dynamically.
.. note::
If ``INSTALLED_APPS`` contains ``grappelli`` this function would
return ``"admin/guardian/grappelli/obj_perms_manage.html"``.
"""
if 'grappelli' in settings.INSTALLED_APPS:
return 'admin/guardian/contrib/grappelli/obj_perms_manage.html'
return self.obj_perms_manage_template
def obj_perms_manage_user_view(self, request, object_pk, user_id):
"""
Manages selected users' permissions for current object.
"""
user = get_object_or_404(get_user_model(), pk=user_id)
obj = get_object_or_404(self.get_queryset(request), pk=object_pk)
form_class = self.get_obj_perms_manage_user_form()
form = form_class(user, obj, request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save_obj_perms()
msg = ugettext("Permissions saved.")
messages.success(request, msg)
info = (
self.admin_site.name,
self.model._meta.app_label,
get_model_name(self.model)
)
url = reverse(
'%s:%s_%s_permissions_manage_user' % info,
args=[obj.pk, user.pk]
)
return redirect(url)
context = self.get_obj_perms_base_context(request, obj)
context['user_obj'] = user
context['user_perms'] = get_perms(user, obj)
context['form'] = form
return render_to_response(self.get_obj_perms_manage_user_template(),
context, RequestContext(request, current_app=self.admin_site.name))
def get_obj_perms_manage_user_template(self):
"""
Returns object permissions for user admin template. May be overridden
if need to change it dynamically.
.. note::
If ``INSTALLED_APPS`` contains ``grappelli`` this function would
return ``"admin/guardian/grappelli/obj_perms_manage_user.html"``.
"""
if 'grappelli' in settings.INSTALLED_APPS:
return 'admin/guardian/contrib/grappelli/obj_perms_manage_user.html'
return self.obj_perms_manage_user_template
def get_obj_perms_manage_user_form(self):
"""
Returns form class for user object permissions management. By default
:form:`AdminUserObjectPermissionsForm` is returned.
"""
return AdminUserObjectPermissionsForm
def obj_perms_manage_group_view(self, request, object_pk, group_id):
"""
Manages selected groups' permissions for current object.
"""
group = get_object_or_404(Group, id=group_id)
obj = get_object_or_404(self.get_queryset(request), pk=object_pk)
form_class = self.get_obj_perms_manage_group_form()
form = form_class(group, obj, request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save_obj_perms()
msg = ugettext("Permissions saved.")
messages.success(request, msg)
info = (
self.admin_site.name,
self.model._meta.app_label,
get_model_name(self.model)
)
url = reverse(
'%s:%s_%s_permissions_manage_group' % info,
args=[obj.pk, group.id]
)
return redirect(url)
context = self.get_obj_perms_base_context(request, obj)
context['group_obj'] = group
context['group_perms'] = get_perms(group, obj)
context['form'] = form
return render_to_response(self.get_obj_perms_manage_group_template(),
context, RequestContext(request, current_app=self.admin_site.name))
def get_obj_perms_manage_group_template(self):
"""
Returns object permissions for group admin template. May be overridden
if need to change it dynamically.
.. note::
If ``INSTALLED_APPS`` contains ``grappelli`` this function would
return ``"admin/guardian/grappelli/obj_perms_manage_group.html"``.
"""
if 'grappelli' in settings.INSTALLED_APPS:
return 'admin/guardian/contrib/grappelli/obj_perms_manage_group.html'
return self.obj_perms_manage_group_template
def get_obj_perms_manage_group_form(self):
"""
Returns form class for group object permissions management. By default
:form:`AdminGroupObjectPermissionsForm` is returned.
"""
return AdminGroupObjectPermissionsForm
class GuardedModelAdmin(GuardedModelAdminMixin, admin.ModelAdmin):
"""
Extends ``django.contrib.admin.ModelAdmin`` class. Provides some extra
views for object permissions management at admin panel. It also changes
default ``change_form_template`` option to
``'admin/guardian/model/change_form.html'`` which is required for proper
url (object permissions related) being shown at the model pages.
**Extra options**
``GuardedModelAdmin.obj_perms_manage_template``
*Default*: ``admin/guardian/model/obj_perms_manage.html``
``GuardedModelAdmin.obj_perms_manage_user_template``
*Default*: ``admin/guardian/model/obj_perms_manage_user.html``
``GuardedModelAdmin.obj_perms_manage_group_template``
*Default*: ``admin/guardian/model/obj_perms_manage_group.html``
``GuardedModelAdmin.user_can_access_owned_objects_only``
*Default*: ``False``
If this would be set to ``True``, ``request.user`` would be used to
filter out objects he or she doesn't own (checking ``user`` field
of used model - field name may be overridden by
``user_owned_objects_field`` option).
.. note::
Please remember that this will **NOT** affect superusers!
Admins would still see all items.
``GuardedModelAdmin.user_can_access_owned_by_group_objects_only``
*Default*: ``False``
If this would be set to ``True``, ``request.user`` would be used to
filter out objects her or his group doesn't own (checking if any group
user belongs to is set as ``group`` field of the object; name of the
field can be changed by overriding ``group_owned_objects_field``).
.. note::
Please remember that this will **NOT** affect superusers!
Admins would still see all items.
``GuardedModelAdmin.group_owned_objects_field``
*Default*: ``group``
``GuardedModelAdmin.include_object_permissions_urls``
*Default*: ``True``
.. versionadded:: 1.2
Might be set to ``False`` in order **NOT** to include guardian-specific
urls.
**Usage example**
Just use :admin:`GuardedModelAdmin` instead of
``django.contrib.admin.ModelAdmin``.
.. code-block:: python
from django.contrib import admin
from guardian.admin import GuardedModelAdmin
from myapp.models import Author
class AuthorAdmin(GuardedModelAdmin):
pass
admin.site.register(Author, AuthorAdmin)
"""
class UserManage(forms.Form):
user = forms.CharField(label=_("User identification"),
max_length=200,
error_messages = {'does_not_exist': _("This user does not exist")},
help_text=_('Enter a value compatible with User.USERNAME_FIELD')
)
def clean_user(self):
"""
Returns ``User`` instance based on the given identification.
"""
identification = self.cleaned_data['user']
user_model = get_user_model()
try:
username_field = user_model.USERNAME_FIELD
except AttributeError:
username_field = 'username'
try:
user = user_model.objects.get(**{username_field: identification})
return user
except user_model.DoesNotExist:
raise forms.ValidationError(
self.fields['user'].error_messages['does_not_exist'])
class GroupManage(forms.Form):
group = forms.CharField(max_length=80, error_messages={'does_not_exist':
_("This group does not exist")})
def clean_group(self):
"""
Returns ``Group`` instance based on the given group name.
"""
name = self.cleaned_data['group']
try:
group = Group.objects.get(name=name)
return group
except Group.DoesNotExist:
raise forms.ValidationError(
self.fields['group'].error_messages['does_not_exist'])
|
|
# Generated by Snowball 2.1.0 - https://snowballstem.org/
from .basestemmer import BaseStemmer
from .among import Among
class EnglishStemmer(BaseStemmer):
'''
This class implements the stemming algorithm defined by a snowball script.
Generated by Snowball 2.1.0 - https://snowballstem.org/
'''
a_0 = [
Among(u"arsen", -1, -1),
Among(u"commun", -1, -1),
Among(u"gener", -1, -1)
]
a_1 = [
Among(u"'", -1, 1),
Among(u"'s'", 0, 1),
Among(u"'s", -1, 1)
]
a_2 = [
Among(u"ied", -1, 2),
Among(u"s", -1, 3),
Among(u"ies", 1, 2),
Among(u"sses", 1, 1),
Among(u"ss", 1, -1),
Among(u"us", 1, -1)
]
a_3 = [
Among(u"", -1, 3),
Among(u"bb", 0, 2),
Among(u"dd", 0, 2),
Among(u"ff", 0, 2),
Among(u"gg", 0, 2),
Among(u"bl", 0, 1),
Among(u"mm", 0, 2),
Among(u"nn", 0, 2),
Among(u"pp", 0, 2),
Among(u"rr", 0, 2),
Among(u"at", 0, 1),
Among(u"tt", 0, 2),
Among(u"iz", 0, 1)
]
a_4 = [
Among(u"ed", -1, 2),
Among(u"eed", 0, 1),
Among(u"ing", -1, 2),
Among(u"edly", -1, 2),
Among(u"eedly", 3, 1),
Among(u"ingly", -1, 2)
]
a_5 = [
Among(u"anci", -1, 3),
Among(u"enci", -1, 2),
Among(u"ogi", -1, 13),
Among(u"li", -1, 15),
Among(u"bli", 3, 12),
Among(u"abli", 4, 4),
Among(u"alli", 3, 8),
Among(u"fulli", 3, 9),
Among(u"lessli", 3, 14),
Among(u"ousli", 3, 10),
Among(u"entli", 3, 5),
Among(u"aliti", -1, 8),
Among(u"biliti", -1, 12),
Among(u"iviti", -1, 11),
Among(u"tional", -1, 1),
Among(u"ational", 14, 7),
Among(u"alism", -1, 8),
Among(u"ation", -1, 7),
Among(u"ization", 17, 6),
Among(u"izer", -1, 6),
Among(u"ator", -1, 7),
Among(u"iveness", -1, 11),
Among(u"fulness", -1, 9),
Among(u"ousness", -1, 10)
]
a_6 = [
Among(u"icate", -1, 4),
Among(u"ative", -1, 6),
Among(u"alize", -1, 3),
Among(u"iciti", -1, 4),
Among(u"ical", -1, 4),
Among(u"tional", -1, 1),
Among(u"ational", 5, 2),
Among(u"ful", -1, 5),
Among(u"ness", -1, 5)
]
a_7 = [
Among(u"ic", -1, 1),
Among(u"ance", -1, 1),
Among(u"ence", -1, 1),
Among(u"able", -1, 1),
Among(u"ible", -1, 1),
Among(u"ate", -1, 1),
Among(u"ive", -1, 1),
Among(u"ize", -1, 1),
Among(u"iti", -1, 1),
Among(u"al", -1, 1),
Among(u"ism", -1, 1),
Among(u"ion", -1, 2),
Among(u"er", -1, 1),
Among(u"ous", -1, 1),
Among(u"ant", -1, 1),
Among(u"ent", -1, 1),
Among(u"ment", 15, 1),
Among(u"ement", 16, 1)
]
a_8 = [
Among(u"e", -1, 1),
Among(u"l", -1, 2)
]
a_9 = [
Among(u"succeed", -1, -1),
Among(u"proceed", -1, -1),
Among(u"exceed", -1, -1),
Among(u"canning", -1, -1),
Among(u"inning", -1, -1),
Among(u"earring", -1, -1),
Among(u"herring", -1, -1),
Among(u"outing", -1, -1)
]
a_10 = [
Among(u"andes", -1, -1),
Among(u"atlas", -1, -1),
Among(u"bias", -1, -1),
Among(u"cosmos", -1, -1),
Among(u"dying", -1, 3),
Among(u"early", -1, 9),
Among(u"gently", -1, 7),
Among(u"howe", -1, -1),
Among(u"idly", -1, 6),
Among(u"lying", -1, 4),
Among(u"news", -1, -1),
Among(u"only", -1, 10),
Among(u"singly", -1, 11),
Among(u"skies", -1, 2),
Among(u"skis", -1, 1),
Among(u"sky", -1, -1),
Among(u"tying", -1, 5),
Among(u"ugly", -1, 8)
]
g_v = [17, 65, 16, 1]
g_v_WXY = [1, 17, 65, 208, 1]
g_valid_LI = [55, 141, 2]
B_Y_found = False
I_p2 = 0
I_p1 = 0
def __r_prelude(self):
self.B_Y_found = False
v_1 = self.cursor
try:
self.bra = self.cursor
if not self.eq_s(u"'"):
raise lab0()
self.ket = self.cursor
if not self.slice_del():
return False
except lab0: pass
self.cursor = v_1
v_2 = self.cursor
try:
self.bra = self.cursor
if not self.eq_s(u"y"):
raise lab1()
self.ket = self.cursor
if not self.slice_from(u"Y"):
return False
self.B_Y_found = True
except lab1: pass
self.cursor = v_2
v_3 = self.cursor
try:
while True:
v_4 = self.cursor
try:
try:
while True:
v_5 = self.cursor
try:
if not self.in_grouping(EnglishStemmer.g_v, 97, 121):
raise lab5()
self.bra = self.cursor
if not self.eq_s(u"y"):
raise lab5()
self.ket = self.cursor
self.cursor = v_5
raise lab4()
except lab5: pass
self.cursor = v_5
if self.cursor >= self.limit:
raise lab3()
self.cursor += 1
except lab4: pass
if not self.slice_from(u"Y"):
return False
self.B_Y_found = True
continue
except lab3: pass
self.cursor = v_4
break
except lab2: pass
self.cursor = v_3
return True
def __r_mark_regions(self):
self.I_p1 = self.limit
self.I_p2 = self.limit
v_1 = self.cursor
try:
try:
v_2 = self.cursor
try:
if self.find_among(EnglishStemmer.a_0) == 0:
raise lab2()
raise lab1()
except lab2: pass
self.cursor = v_2
if not self.go_out_grouping(EnglishStemmer.g_v, 97, 121):
raise lab0()
self.cursor += 1
if not self.go_in_grouping(EnglishStemmer.g_v, 97, 121):
raise lab0()
self.cursor += 1
except lab1: pass
self.I_p1 = self.cursor
if not self.go_out_grouping(EnglishStemmer.g_v, 97, 121):
raise lab0()
self.cursor += 1
if not self.go_in_grouping(EnglishStemmer.g_v, 97, 121):
raise lab0()
self.cursor += 1
self.I_p2 = self.cursor
except lab0: pass
self.cursor = v_1
return True
def __r_shortv(self):
try:
v_1 = self.limit - self.cursor
try:
if not self.out_grouping_b(EnglishStemmer.g_v_WXY, 89, 121):
raise lab1()
if not self.in_grouping_b(EnglishStemmer.g_v, 97, 121):
raise lab1()
if not self.out_grouping_b(EnglishStemmer.g_v, 97, 121):
raise lab1()
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
if not self.out_grouping_b(EnglishStemmer.g_v, 97, 121):
return False
if not self.in_grouping_b(EnglishStemmer.g_v, 97, 121):
return False
if self.cursor > self.limit_backward:
return False
except lab0: pass
return True
def __r_R1(self):
if not self.I_p1 <= self.cursor:
return False
return True
def __r_R2(self):
if not self.I_p2 <= self.cursor:
return False
return True
def __r_Step_1a(self):
v_1 = self.limit - self.cursor
try:
self.ket = self.cursor
if self.find_among_b(EnglishStemmer.a_1) == 0:
self.cursor = self.limit - v_1
raise lab0()
self.bra = self.cursor
if not self.slice_del():
return False
except lab0: pass
self.ket = self.cursor
among_var = self.find_among_b(EnglishStemmer.a_2)
if among_var == 0:
return False
self.bra = self.cursor
if among_var == 1:
if not self.slice_from(u"ss"):
return False
elif among_var == 2:
try:
v_2 = self.limit - self.cursor
try:
c = self.cursor - 2
if c < self.limit_backward:
raise lab2()
self.cursor = c
if not self.slice_from(u"i"):
return False
raise lab1()
except lab2: pass
self.cursor = self.limit - v_2
if not self.slice_from(u"ie"):
return False
except lab1: pass
elif among_var == 3:
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
if not self.go_out_grouping_b(EnglishStemmer.g_v, 97, 121):
return False
self.cursor -= 1
if not self.slice_del():
return False
return True
def __r_Step_1b(self):
self.ket = self.cursor
among_var = self.find_among_b(EnglishStemmer.a_4)
if among_var == 0:
return False
self.bra = self.cursor
if among_var == 1:
if not self.__r_R1():
return False
if not self.slice_from(u"ee"):
return False
else:
v_1 = self.limit - self.cursor
if not self.go_out_grouping_b(EnglishStemmer.g_v, 97, 121):
return False
self.cursor -= 1
self.cursor = self.limit - v_1
if not self.slice_del():
return False
v_2 = self.limit - self.cursor
among_var = self.find_among_b(EnglishStemmer.a_3)
if among_var == 0:
return False
self.cursor = self.limit - v_2
if among_var == 1:
c = self.cursor
self.insert(self.cursor, self.cursor, u"e")
self.cursor = c
elif among_var == 2:
self.ket = self.cursor
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
self.bra = self.cursor
if not self.slice_del():
return False
else:
if self.cursor != self.I_p1:
return False
v_3 = self.limit - self.cursor
if not self.__r_shortv():
return False
self.cursor = self.limit - v_3
c = self.cursor
self.insert(self.cursor, self.cursor, u"e")
self.cursor = c
return True
def __r_Step_1c(self):
self.ket = self.cursor
try:
v_1 = self.limit - self.cursor
try:
if not self.eq_s_b(u"y"):
raise lab1()
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
if not self.eq_s_b(u"Y"):
return False
except lab0: pass
self.bra = self.cursor
if not self.out_grouping_b(EnglishStemmer.g_v, 97, 121):
return False
try:
if self.cursor > self.limit_backward:
raise lab2()
return False
except lab2: pass
if not self.slice_from(u"i"):
return False
return True
def __r_Step_2(self):
self.ket = self.cursor
among_var = self.find_among_b(EnglishStemmer.a_5)
if among_var == 0:
return False
self.bra = self.cursor
if not self.__r_R1():
return False
if among_var == 1:
if not self.slice_from(u"tion"):
return False
elif among_var == 2:
if not self.slice_from(u"ence"):
return False
elif among_var == 3:
if not self.slice_from(u"ance"):
return False
elif among_var == 4:
if not self.slice_from(u"able"):
return False
elif among_var == 5:
if not self.slice_from(u"ent"):
return False
elif among_var == 6:
if not self.slice_from(u"ize"):
return False
elif among_var == 7:
if not self.slice_from(u"ate"):
return False
elif among_var == 8:
if not self.slice_from(u"al"):
return False
elif among_var == 9:
if not self.slice_from(u"ful"):
return False
elif among_var == 10:
if not self.slice_from(u"ous"):
return False
elif among_var == 11:
if not self.slice_from(u"ive"):
return False
elif among_var == 12:
if not self.slice_from(u"ble"):
return False
elif among_var == 13:
if not self.eq_s_b(u"l"):
return False
if not self.slice_from(u"og"):
return False
elif among_var == 14:
if not self.slice_from(u"less"):
return False
else:
if not self.in_grouping_b(EnglishStemmer.g_valid_LI, 99, 116):
return False
if not self.slice_del():
return False
return True
def __r_Step_3(self):
self.ket = self.cursor
among_var = self.find_among_b(EnglishStemmer.a_6)
if among_var == 0:
return False
self.bra = self.cursor
if not self.__r_R1():
return False
if among_var == 1:
if not self.slice_from(u"tion"):
return False
elif among_var == 2:
if not self.slice_from(u"ate"):
return False
elif among_var == 3:
if not self.slice_from(u"al"):
return False
elif among_var == 4:
if not self.slice_from(u"ic"):
return False
elif among_var == 5:
if not self.slice_del():
return False
else:
if not self.__r_R2():
return False
if not self.slice_del():
return False
return True
def __r_Step_4(self):
self.ket = self.cursor
among_var = self.find_among_b(EnglishStemmer.a_7)
if among_var == 0:
return False
self.bra = self.cursor
if not self.__r_R2():
return False
if among_var == 1:
if not self.slice_del():
return False
else:
try:
v_1 = self.limit - self.cursor
try:
if not self.eq_s_b(u"s"):
raise lab1()
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
if not self.eq_s_b(u"t"):
return False
except lab0: pass
if not self.slice_del():
return False
return True
def __r_Step_5(self):
self.ket = self.cursor
among_var = self.find_among_b(EnglishStemmer.a_8)
if among_var == 0:
return False
self.bra = self.cursor
if among_var == 1:
try:
v_1 = self.limit - self.cursor
try:
if not self.__r_R2():
raise lab1()
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
if not self.__r_R1():
return False
v_2 = self.limit - self.cursor
try:
if not self.__r_shortv():
raise lab2()
return False
except lab2: pass
self.cursor = self.limit - v_2
except lab0: pass
if not self.slice_del():
return False
else:
if not self.__r_R2():
return False
if not self.eq_s_b(u"l"):
return False
if not self.slice_del():
return False
return True
def __r_exception2(self):
self.ket = self.cursor
if self.find_among_b(EnglishStemmer.a_9) == 0:
return False
self.bra = self.cursor
if self.cursor > self.limit_backward:
return False
return True
def __r_exception1(self):
self.bra = self.cursor
among_var = self.find_among(EnglishStemmer.a_10)
if among_var == 0:
return False
self.ket = self.cursor
if self.cursor < self.limit:
return False
if among_var == 1:
if not self.slice_from(u"ski"):
return False
elif among_var == 2:
if not self.slice_from(u"sky"):
return False
elif among_var == 3:
if not self.slice_from(u"die"):
return False
elif among_var == 4:
if not self.slice_from(u"lie"):
return False
elif among_var == 5:
if not self.slice_from(u"tie"):
return False
elif among_var == 6:
if not self.slice_from(u"idl"):
return False
elif among_var == 7:
if not self.slice_from(u"gentl"):
return False
elif among_var == 8:
if not self.slice_from(u"ugli"):
return False
elif among_var == 9:
if not self.slice_from(u"earli"):
return False
elif among_var == 10:
if not self.slice_from(u"onli"):
return False
elif among_var == 11:
if not self.slice_from(u"singl"):
return False
return True
def __r_postlude(self):
if not self.B_Y_found:
return False
while True:
v_1 = self.cursor
try:
try:
while True:
v_2 = self.cursor
try:
self.bra = self.cursor
if not self.eq_s(u"Y"):
raise lab2()
self.ket = self.cursor
self.cursor = v_2
raise lab1()
except lab2: pass
self.cursor = v_2
if self.cursor >= self.limit:
raise lab0()
self.cursor += 1
except lab1: pass
if not self.slice_from(u"y"):
return False
continue
except lab0: pass
self.cursor = v_1
break
return True
def _stem(self):
try:
v_1 = self.cursor
try:
if not self.__r_exception1():
raise lab1()
raise lab0()
except lab1: pass
self.cursor = v_1
try:
v_2 = self.cursor
try:
c = self.cursor + 3
if c > self.limit:
raise lab3()
self.cursor = c
raise lab2()
except lab3: pass
self.cursor = v_2
raise lab0()
except lab2: pass
self.cursor = v_1
self.__r_prelude()
self.__r_mark_regions()
self.limit_backward = self.cursor
self.cursor = self.limit
v_5 = self.limit - self.cursor
self.__r_Step_1a()
self.cursor = self.limit - v_5
try:
v_6 = self.limit - self.cursor
try:
if not self.__r_exception2():
raise lab5()
raise lab4()
except lab5: pass
self.cursor = self.limit - v_6
v_7 = self.limit - self.cursor
self.__r_Step_1b()
self.cursor = self.limit - v_7
v_8 = self.limit - self.cursor
self.__r_Step_1c()
self.cursor = self.limit - v_8
v_9 = self.limit - self.cursor
self.__r_Step_2()
self.cursor = self.limit - v_9
v_10 = self.limit - self.cursor
self.__r_Step_3()
self.cursor = self.limit - v_10
v_11 = self.limit - self.cursor
self.__r_Step_4()
self.cursor = self.limit - v_11
v_12 = self.limit - self.cursor
self.__r_Step_5()
self.cursor = self.limit - v_12
except lab4: pass
self.cursor = self.limit_backward
v_13 = self.cursor
self.__r_postlude()
self.cursor = v_13
except lab0: pass
return True
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass
class lab5(BaseException): pass
|
|
"""TestSuite"""
import sys
import unittest
from django.utils.unittest import case, util
__unittest = True
class BaseTestSuite(unittest.TestSuite):
"""A simple test suite that doesn't provide class or module shared fixtures.
"""
def __init__(self, tests=()):
self._tests = []
self.addTests(tests)
def __repr__(self):
return "<%s tests=%s>" % (util.strclass(self.__class__), list(self))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return list(self) == list(other)
def __ne__(self, other):
return not self == other
# Can't guarantee hash invariant, so flag as unhashable
__hash__ = None
def __iter__(self):
return iter(self._tests)
def countTestCases(self):
cases = 0
for test in self:
cases += test.countTestCases()
return cases
def addTest(self, test):
# sanity checks
if not hasattr(test, '__call__'):
raise TypeError("%r is not callable" % (repr(test),))
if isinstance(test, type) and issubclass(test,
(case.TestCase, TestSuite)):
raise TypeError("TestCases and TestSuites must be instantiated "
"before passing them to addTest()")
self._tests.append(test)
def addTests(self, tests):
if isinstance(tests, basestring):
raise TypeError("tests must be an iterable of tests, not a string")
for test in tests:
self.addTest(test)
def run(self, result):
for test in self:
if result.shouldStop:
break
test(result)
return result
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
for test in self:
test.debug()
class TestSuite(BaseTestSuite):
"""A test suite is a composite test consisting of a number of TestCases.
For use, create an instance of TestSuite, then add test case instances.
When all tests have been added, the suite can be passed to a test
runner, such as TextTestRunner. It will run the individual test cases
in the order in which they were added, aggregating the results. When
subclassing, do not forget to call the base class constructor.
"""
def run(self, result):
self._wrapped_run(result)
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
return result
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
debug = _DebugResult()
self._wrapped_run(debug, True)
self._tearDownPreviousClass(None, debug)
self._handleModuleTearDown(debug)
################################
# private methods
def _wrapped_run(self, result, debug=False):
for test in self:
if result.shouldStop:
break
if _isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
continue
if hasattr(test, '_wrapped_run'):
test._wrapped_run(result, debug)
elif not debug:
test(result)
else:
test.debug()
def _handleClassSetUp(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if result._moduleSetUpFailed:
return
if getattr(currentClass, "__unittest_skip__", False):
return
try:
currentClass._classSetupFailed = False
except TypeError:
# test may actually be a function
# so its class will be a builtin-type
pass
setUpClass = getattr(currentClass, 'setUpClass', None)
if setUpClass is not None:
try:
setUpClass()
except Exception, e:
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
errorName = 'setUpClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
def _get_previous_module(self, result):
previousModule = None
previousClass = getattr(result, '_previousTestClass', None)
if previousClass is not None:
previousModule = previousClass.__module__
return previousModule
def _handleModuleFixture(self, test, result):
previousModule = self._get_previous_module(result)
currentModule = test.__class__.__module__
if currentModule == previousModule:
return
self._handleModuleTearDown(result)
result._moduleSetUpFailed = False
try:
module = sys.modules[currentModule]
except KeyError:
return
setUpModule = getattr(module, 'setUpModule', None)
if setUpModule is not None:
try:
setUpModule()
except Exception, e:
if isinstance(result, _DebugResult):
raise
result._moduleSetUpFailed = True
errorName = 'setUpModule (%s)' % currentModule
self._addClassOrModuleLevelException(result, e, errorName)
def _addClassOrModuleLevelException(self, result, exception, errorName):
error = _ErrorHolder(errorName)
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None and isinstance(exception, case.SkipTest):
addSkip(error, str(exception))
else:
result.addError(error, sys.exc_info())
def _handleModuleTearDown(self, result):
previousModule = self._get_previous_module(result)
if previousModule is None:
return
if result._moduleSetUpFailed:
return
try:
module = sys.modules[previousModule]
except KeyError:
return
tearDownModule = getattr(module, 'tearDownModule', None)
if tearDownModule is not None:
try:
tearDownModule()
except Exception, e:
if isinstance(result, _DebugResult):
raise
errorName = 'tearDownModule (%s)' % previousModule
self._addClassOrModuleLevelException(result, e, errorName)
def _tearDownPreviousClass(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if getattr(previousClass, '_classSetupFailed', False):
return
if getattr(result, '_moduleSetUpFailed', False):
return
if getattr(previousClass, "__unittest_skip__", False):
return
tearDownClass = getattr(previousClass, 'tearDownClass', None)
if tearDownClass is not None:
try:
tearDownClass()
except Exception, e:
if isinstance(result, _DebugResult):
raise
className = util.strclass(previousClass)
errorName = 'tearDownClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
class _ErrorHolder(object):
"""
Placeholder for a TestCase inside a result. As far as a TestResult
is concerned, this looks exactly like a unit test. Used to insert
arbitrary errors into a test suite run.
"""
# Inspired by the ErrorHolder from Twisted:
# http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py
# attribute used by TestResult._exc_info_to_string
failureException = None
def __init__(self, description):
self.description = description
def id(self):
return self.description
def shortDescription(self):
return None
def __repr__(self):
return "<ErrorHolder description=%r>" % (self.description,)
def __str__(self):
return self.id()
def run(self, result):
# could call result.addError(...) - but this test-like object
# shouldn't be run anyway
pass
def __call__(self, result):
return self.run(result)
def countTestCases(self):
return 0
def _isnotsuite(test):
"A crude way to tell apart testcases and suites with duck-typing"
try:
iter(test)
except TypeError:
return True
return False
class _DebugResult(object):
"Used by the TestSuite to hold previous class when running in debug."
_previousTestClass = None
_moduleSetUpFailed = False
shouldStop = False
|
|
import sublime
import sublime_plugin
import os
import re
from . import context
from . import util
from . import processor
from .salesforce.lib.panel import Printer
class OpenLightningDocReferences(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(OpenLightningDocReferences, self).__init__(*args, **kwargs)
def run(self):
instance = util.get_instance(self.settings)
if instance == "emea": instance = "eu0"
start_url = "https://%s.lightning.force.com/docs/component-library" % instance
self.window.run_command("login_to_sfdc", {"startURL": start_url})
def is_enabled(self):
self.settings = context.get_settings()
metadata = util.get_described_metadata(self.settings)
if not metadata:
return False
return True
class DeployLightningToServer(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(DeployLightningToServer, self).__init__(*args, **kwargs)
self.meta_type = ''
def run(self, dirs, switch_project=True, source_org=None, element=None, update_meta=False):
if switch_project:
return self.window.run_command("switch_project", {
"callback_options": {
"callback_command": "deploy_lightning_to_server",
"args": {
"switch_project": False,
"source_org": self.settings["default_project_name"],
"dirs": dirs,
"element": element,
"update_meta": update_meta
}
}
})
base64_package = util.build_lightning_package(dirs, meta_type=self.meta_type)
processor.handle_deploy_thread(
base64_package,
source_org=source_org,
element=element,
update_meta=update_meta
)
def is_visible(self, dirs, switch_project=True):
if not dirs or len(dirs) == 0:
return False
self.settings = context.get_settings()
for _dir in dirs:
attributes = util.get_file_attributes(_dir)
meta_folder = attributes["metadata_folder"]
if meta_folder not in ["aura", "lwc"]:
return False
self.meta_type = 'AuraDefinitionBundle' if meta_folder == 'aura' else 'LightningComponentBundle'
if self.settings["default_project_name"] not in _dir:
return False
return True
class PreviewLightningAppInServer(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(PreviewLightningAppInServer, self).__init__(*args, **kwargs)
def run(self, app_name=None):
if app_name:
return self.preview_app(app_name)
# Get all available apps to preview in the local aura path
aura_dir = os.path.join(self.settings["workspace"], "src", "aura")
self.app_names = []
for dirpath, dirnames, filenames in os.walk(aura_dir):
for filename in filenames:
attributes = util.get_file_attributes(filename)
if attributes["extension"] == "app":
self.app_names.append(attributes["name"])
self.app_names = sorted(self.app_names)
# Check whether has available app to preview
if not self.app_names:
return Printer.get("error").write("No available app to preview")
self.window.show_quick_panel(self.app_names, self.on_chosen)
def on_chosen(self, index):
if index == -1: return
self.preview_app(self.app_names[index])
def preview_app(self, app_name):
instance = util.get_instance(self.settings)
start_url = "https://%s.lightning.force.com/%s/%s.app" % (
instance, self.namespace, app_name
)
self.window.run_command("login_to_sfdc", {"startURL": start_url})
def is_enabled(self):
self.settings = context.get_settings()
metadata = util.get_described_metadata(self.settings)
if not metadata:
return False
self.namespace = metadata["organizationNamespace"]
if not self.namespace:
self.namespace = 'c'
return True
class PreviewThisAppInServer(sublime_plugin.TextCommand):
def run(self, edit):
self.view.window().run_command('preview_lightning_app_in_server', {
"app_name": self.app_name
})
def is_enabled(self):
if not self.view.file_name():
return False
attrs = util.get_file_attributes(self.view.file_name())
if attrs["metadata_folder"] != 'aura' or attrs["extension"] != "app":
return False
self.app_name = attrs["name"]
return True
def is_visible(self):
return self.is_enabled()
class RetrieveLightningFromServer(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(RetrieveLightningFromServer, self).__init__(*args, **kwargs)
def run(self, dirs):
message = "Are you sure you really want to continue refreshing"
if sublime.ok_cancel_dialog(message, "Confirm?"):
processor.handle_retrieve_package(
self.types,
self.settings["workspace"],
ignore_package_xml=True
)
def is_visible(self, dirs):
self.settings = context.get_settings()
self.types = {}
if len(dirs) == 0:
return False
for _dir in dirs:
if os.path.isfile(_dir):
continue
base, _name = os.path.split(_dir)
base, _folder = os.path.split(base)
# Check Metadata Type
if _folder not in ["aura", "lwc"]:
continue
# Check Project Name
pn = self.settings["default_project_name"]
if pn not in _dir:
continue
if "AuraDefinitionBundle" in self.types:
self.types["AuraDefinitionBundle"].append(_name)
elif "LightningComponentBundle" in self.types:
self.types["LightningComponentBundle"].append(_name)
elif _folder == 'aura':
self.types["AuraDefinitionBundle"] = [_name]
elif _folder == 'lwc':
self.types["LightningComponentBundle"] = [_name]
# Check whether any aura components are chosen
if not self.types:
return False
return True
class DestructLightningFromServer(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(DestructLightningFromServer, self).__init__(*args, **kwargs)
def run(self, dirs):
_, bundle_name = os.path.split(dirs[0])
if sublime.ok_cancel_dialog("This will Delete %s !" % bundle_name + " Confirm to continue?"):
processor.handle_destructive_files(dirs, ignore_folder=False)
def is_visible(self, dirs):
if len(dirs) == 0:
return False
self.settings = context.get_settings()
for _dir in dirs:
attributes = util.get_file_attributes(_dir)
if attributes["metadata_folder"] not in ["aura", "lwc"]:
return False
if not util.check_enabled(_dir, check_cache=False):
return False
return True
class CreateLightningElement(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(CreateLightningElement, self).__init__(*args, **kwargs)
def run(self, dirs, element=""):
""" element: Component, Controller, Helper, Style, Documentation, Render
"""
# Get template attribute
templates = util.load_templates()
template = templates.get("AuraElement").get(element)
settings = context.get_settings()
templates_path = os.path.join(settings["workspace"],
".templates", template["directory"])
with open(templates_path) as fp:
body = fp.read()
# JS Component is different with others
extension = template["extension"]
element_name = "%s%s%s" % (
self.aura_name,
element if extension == ".js" else "",
extension
)
# Combine Aura element component name
element_file = os.path.join(self._dir, element_name)
# If element file is already exist, just alert
if os.path.isfile(element_file):
return self.window.open_file(element_file)
# Create Aura Element file
with open(element_file, "w") as fp:
fp.write(body)
# If created succeed, just open it and refresh project
self.window.open_file(element_file)
self.window.run_command("refresh_folder_list")
# Deploy Aura to server
self.window.run_command("deploy_lightning_to_server", {
"dirs": [self._dir],
"switch_project": False,
"element": element,
"update_meta": True
})
def is_visible(self, dirs, element=""):
if not dirs or len(dirs) != 1: return False
self._dir = dirs[0]
# Check whether project is the active one
settings = context.get_settings()
if settings["default_project_name"] not in self._dir:
return False
# Check metadata folder
attributes = util.get_file_attributes(self._dir)
if attributes["metadata_folder"] != "aura":
return False
self.aura_name = attributes["name"]
# Check lightning type
lightning_extensions = []
for dirpath, dirnames, filenames in os.walk(self._dir):
for filename in filenames:
extension = filename[filename.find("."):]
lightning_extensions.append(extension)
# Just Component and Application can have child elements
if ".cmp" in lightning_extensions or ".app" in lightning_extensions:
return True
return False
class CreateLightningDefinition(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(CreateLightningDefinition, self).__init__(*args, **kwargs)
def run(self, _type=""):
self._type = _type
self.window.show_input_panel("Please Input %s Name: " % _type,
"", self.on_input, None, None)
def on_input(self, lightning_name):
# Create component to local according to user input
if not re.match('^[a-zA-Z]+\\w+$', lightning_name):
message = 'Invalid format, do you want to try again?'
if not sublime.ok_cancel_dialog(message): return
self.window.show_input_panel("Please Input %s Name: " % self._type,
"", self.on_input, None, None)
return
# Get settings
settings = context.get_settings()
workspace = settings["workspace"]
# Get template attribute
templates = util.load_templates()
template = templates.get("Aura").get(self._type)
with open(os.path.join(workspace, ".templates", template["directory"])) as fp:
body = fp.read()
# Build dir for new lightning component
component_dir = os.path.join(workspace, "src", "aura", lightning_name)
if not os.path.exists(component_dir):
os.makedirs(component_dir)
else:
message = "%s is already exist, do you want to try again?" % lightning_name
if not sublime.ok_cancel_dialog(message, "Try Again?"): return
self.window.show_input_panel("Please Input Lightning Name: ",
"", self.on_input, None, None)
return
lightning_file = os.path.join(component_dir, lightning_name + template["extension"])
# Create Aura lightning file
with open(lightning_file, "w") as fp:
fp.write(body)
# If created succeed, just open it and refresh project
window = sublime.active_window()
window.open_file(lightning_file)
window.run_command("refresh_folder_list")
# Deploy Aura to server
self.window.run_command("deploy_lightning_to_server", {
"dirs": [component_dir],
"switch_project": False,
"element": self._type,
"update_meta": True
})
def is_enabled(self):
return util.check_action_enabled()
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import operator
import pytest
from pandas.compat import (zip, range, lrange, StringIO)
from pandas import DataFrame, Series, Index, MultiIndex, date_range
import pandas as pd
import numpy as np
from numpy.random import randn
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
makeCustomDataframe as mkdf)
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.computation.check import _NUMEXPR_INSTALLED
from pandas.tests.frame.common import TestData
PARSERS = 'python', 'pandas'
ENGINES = 'python', pytest.param('numexpr', marks=td.skip_if_no_ne)
@pytest.fixture(params=PARSERS, ids=lambda x: x)
def parser(request):
return request.param
@pytest.fixture(params=ENGINES, ids=lambda x: x)
def engine(request):
return request.param
def skip_if_no_pandas_parser(parser):
if parser != 'pandas':
pytest.skip("cannot evaluate with parser {0!r}".format(parser))
class TestCompat(object):
def setup_method(self, method):
self.df = DataFrame({'A': [1, 2, 3]})
self.expected1 = self.df[self.df.A > 0]
self.expected2 = self.df.A + 1
def test_query_default(self):
# GH 12749
# this should always work, whether _NUMEXPR_INSTALLED or not
df = self.df
result = df.query('A>0')
assert_frame_equal(result, self.expected1)
result = df.eval('A+1')
assert_series_equal(result, self.expected2, check_names=False)
def test_query_None(self):
df = self.df
result = df.query('A>0', engine=None)
assert_frame_equal(result, self.expected1)
result = df.eval('A+1', engine=None)
assert_series_equal(result, self.expected2, check_names=False)
def test_query_python(self):
df = self.df
result = df.query('A>0', engine='python')
assert_frame_equal(result, self.expected1)
result = df.eval('A+1', engine='python')
assert_series_equal(result, self.expected2, check_names=False)
def test_query_numexpr(self):
df = self.df
if _NUMEXPR_INSTALLED:
result = df.query('A>0', engine='numexpr')
assert_frame_equal(result, self.expected1)
result = df.eval('A+1', engine='numexpr')
assert_series_equal(result, self.expected2, check_names=False)
else:
pytest.raises(ImportError,
lambda: df.query('A>0', engine='numexpr'))
pytest.raises(ImportError,
lambda: df.eval('A+1', engine='numexpr'))
class TestDataFrameEval(TestData):
def test_ops(self):
# tst ops and reversed ops in evaluation
# GH7198
# smaller hits python, larger hits numexpr
for n in [4, 4000]:
df = DataFrame(1, index=range(n), columns=list('abcd'))
df.iloc[0] = 2
m = df.mean()
for op_str, op, rop in [('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__')]:
base = (DataFrame(np.tile(m.values, n) # noqa
.reshape(n, -1),
columns=list('abcd')))
expected = eval("base{op}df".format(op=op_str))
# ops as strings
result = eval("m{op}df".format(op=op_str))
assert_frame_equal(result, expected)
# these are commutative
if op in ['+', '*']:
result = getattr(df, op)(m)
assert_frame_equal(result, expected)
# these are not
elif op in ['-', '/']:
result = getattr(df, rop)(m)
assert_frame_equal(result, expected)
# GH7192
df = DataFrame(dict(A=np.random.randn(25000)))
df.iloc[0:5] = np.nan
expected = (1 - np.isnan(df.iloc[0:25]))
result = (1 - np.isnan(df)).iloc[0:25]
assert_frame_equal(result, expected)
def test_query_non_str(self):
# GH 11485
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'b']})
msg = "expr must be a string to be evaluated"
with tm.assert_raises_regex(ValueError, msg):
df.query(lambda x: x.B == "b")
with tm.assert_raises_regex(ValueError, msg):
df.query(111)
def test_query_empty_string(self):
# GH 13139
df = pd.DataFrame({'A': [1, 2, 3]})
msg = "expr cannot be an empty string"
with tm.assert_raises_regex(ValueError, msg):
df.query('')
def test_eval_resolvers_as_list(self):
# GH 14095
df = DataFrame(randn(10, 2), columns=list('ab'))
dict1 = {'a': 1}
dict2 = {'b': 2}
assert (df.eval('a + b', resolvers=[dict1, dict2]) ==
dict1['a'] + dict2['b'])
assert (pd.eval('a + b', resolvers=[dict1, dict2]) ==
dict1['a'] + dict2['b'])
class TestDataFrameQueryWithMultiIndex(object):
def test_query_with_named_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(['red', 'green'], size=10)
b = np.random.choice(['eggs', 'ham'], size=10)
index = MultiIndex.from_arrays([a, b], names=['color', 'food'])
df = DataFrame(randn(10, 2), index=index)
ind = Series(df.index.get_level_values('color').values, index=index,
name='color')
# equality
res1 = df.query('color == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == color', parser=parser, engine=engine)
exp = df[ind == 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('color != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != color', parser=parser, engine=engine)
exp = df[ind != 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('color == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == color', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('color != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != color', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in color', parser=parser, engine=engine)
res2 = df.query('"red" in color', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in color', parser=parser, engine=engine)
res2 = df.query('"red" not in color', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
def test_query_with_unnamed_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(['red', 'green'], size=10)
b = np.random.choice(['eggs', 'ham'], size=10)
index = MultiIndex.from_arrays([a, b])
df = DataFrame(randn(10, 2), index=index)
ind = Series(df.index.get_level_values(0).values, index=index)
res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == ilevel_0', parser=parser, engine=engine)
exp = df[ind == 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != ilevel_0', parser=parser, engine=engine)
exp = df[ind != 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_0 == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('ilevel_0 != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" in ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in ilevel_0', parser=parser,
engine=engine)
res2 = df.query('"red" not in ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# ## LEVEL 1
ind = Series(df.index.get_level_values(1).values, index=index)
res1 = df.query('ilevel_1 == "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" == ilevel_1', parser=parser, engine=engine)
exp = df[ind == 'eggs']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_1 != "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" != ilevel_1', parser=parser, engine=engine)
exp = df[ind != 'eggs']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_1 == ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] == ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('ilevel_1 != ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] != ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["eggs"] in ilevel_1', parser=parser, engine=engine)
res2 = df.query('"eggs" in ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["eggs"] not in ilevel_1', parser=parser,
engine=engine)
res2 = df.query('"eggs" not in ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
def test_query_with_partially_named_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(['red', 'green'], size=10)
b = np.arange(10)
index = MultiIndex.from_arrays([a, b])
index.names = [None, 'rating']
df = DataFrame(randn(10, 2), index=index)
res = df.query('rating == 1', parser=parser, engine=engine)
ind = Series(df.index.get_level_values('rating').values, index=index,
name='rating')
exp = df[ind == 1]
assert_frame_equal(res, exp)
res = df.query('rating != 1', parser=parser, engine=engine)
ind = Series(df.index.get_level_values('rating').values, index=index,
name='rating')
exp = df[ind != 1]
assert_frame_equal(res, exp)
res = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind == "red"]
assert_frame_equal(res, exp)
res = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind != "red"]
assert_frame_equal(res, exp)
def test_query_multiindex_get_index_resolvers(self):
df = mkdf(10, 3, r_idx_nlevels=2, r_idx_names=['spam', 'eggs'])
resolvers = df._get_index_resolvers()
def to_series(mi, level):
level_values = mi.get_level_values(level)
s = level_values.to_series()
s.index = mi
return s
col_series = df.columns.to_series()
expected = {'index': df.index,
'columns': col_series,
'spam': to_series(df.index, 'spam'),
'eggs': to_series(df.index, 'eggs'),
'C0': col_series}
for k, v in resolvers.items():
if isinstance(v, Index):
assert v.is_(expected[k])
elif isinstance(v, Series):
assert_series_equal(v, expected[k])
else:
raise AssertionError("object must be a Series or Index")
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_raise_on_panel_with_multiindex(self, parser, engine):
p = tm.makePanel(7)
p.items = tm.makeCustomIndex(len(p.items), nlevels=2)
with pytest.raises(NotImplementedError):
pd.eval('p + 1', parser=parser, engine=engine)
@td.skip_if_no_ne
class TestDataFrameQueryNumExprPandas(object):
@classmethod
def setup_class(cls):
cls.engine = 'numexpr'
cls.parser = 'pandas'
@classmethod
def teardown_class(cls):
del cls.engine, cls.parser
def test_date_query_with_attribute_access(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('@df.dates1 < 20130101 < @df.dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates2'] = date_range('1/1/2013', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT
res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.set_index('dates1', inplace=True, drop=True)
res = df.query('index < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.iloc[0, 0] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('index < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
d = {}
d['dates1'] = date_range('1/1/2012', periods=n)
d['dates3'] = date_range('1/1/2014', periods=n)
df = DataFrame(d)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index.to_series() < '20130101') &
('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_with_non_date(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame({'dates': date_range('1/1/2012', periods=n),
'nondate': np.arange(n)})
result = df.query('dates == nondate', parser=parser, engine=engine)
assert len(result) == 0
result = df.query('dates != nondate', parser=parser, engine=engine)
assert_frame_equal(result, df)
for op in ['<', '>', '<=', '>=']:
with pytest.raises(TypeError):
df.query('dates %s nondate' % op, parser=parser, engine=engine)
def test_query_syntax_error(self):
engine, parser = self.engine, self.parser
df = DataFrame({"i": lrange(10), "+": lrange(3, 13),
"r": lrange(4, 14)})
with pytest.raises(SyntaxError):
df.query('i - +', engine=engine, parser=parser)
def test_query_scope(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(20, 2), columns=list('ab'))
a, b = 1, 2 # noqa
res = df.query('a > b', engine=engine, parser=parser)
expected = df[df.a > df.b]
assert_frame_equal(res, expected)
res = df.query('@a > b', engine=engine, parser=parser)
expected = df[a > df.b]
assert_frame_equal(res, expected)
# no local variable c
with pytest.raises(UndefinedVariableError):
df.query('@a > b > @c', engine=engine, parser=parser)
# no column named 'c'
with pytest.raises(UndefinedVariableError):
df.query('@a > b > c', engine=engine, parser=parser)
def test_query_doesnt_pickup_local(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
# we don't pick up the local 'sin'
with pytest.raises(UndefinedVariableError):
df.query('sin > 5', engine=engine, parser=parser)
def test_query_builtin(self):
from pandas.core.computation.engines import NumExprClobberingError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
with tm.assert_raises_regex(NumExprClobberingError,
'Variables in expression.+'):
df.query('sin > 5', engine=engine, parser=parser)
def test_query(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(10, 3), columns=['a', 'b', 'c'])
assert_frame_equal(df.query('a < b', engine=engine, parser=parser),
df[df.a < df.b])
assert_frame_equal(df.query('a + b > b * c', engine=engine,
parser=parser),
df[df.a + df.b > df.b * df.c])
def test_query_index_with_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randint(10, size=(10, 3)),
index=Index(range(10), name='blob'),
columns=['a', 'b', 'c'])
res = df.query('(blob < 5) & (a < b)', engine=engine, parser=parser)
expec = df[(df.index < 5) & (df.a < df.b)]
assert_frame_equal(res, expec)
res = df.query('blob < b', engine=engine, parser=parser)
expec = df[df.index < df.b]
assert_frame_equal(res, expec)
def test_query_index_without_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randint(10, size=(10, 3)),
index=range(10), columns=['a', 'b', 'c'])
# "index" should refer to the index
res = df.query('index < b', engine=engine, parser=parser)
expec = df[df.index < df.b]
assert_frame_equal(res, expec)
# test against a scalar
res = df.query('index < 5', engine=engine, parser=parser)
expec = df[df.index < 5]
assert_frame_equal(res, expec)
def test_nested_scope(self):
engine = self.engine
parser = self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
expected = df[(df > 0) & (df2 > 0)]
result = df.query('(@df > 0) & (@df2 > 0)', engine=engine,
parser=parser)
assert_frame_equal(result, expected)
result = pd.eval('df[df > 0 and df2 > 0]', engine=engine,
parser=parser)
assert_frame_equal(result, expected)
result = pd.eval('df[df > 0 and df2 > 0 and df[df > 0] > 0]',
engine=engine, parser=parser)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
assert_frame_equal(result, expected)
result = pd.eval('df[(df>0) & (df2>0)]', engine=engine, parser=parser)
expected = df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)
assert_frame_equal(result, expected)
def test_nested_raises_on_local_self_reference(self):
from pandas.core.computation.ops import UndefinedVariableError
df = DataFrame(np.random.randn(5, 3))
# can't reference ourself b/c we're a local so @ is necessary
with pytest.raises(UndefinedVariableError):
df.query('df > 0', engine=self.engine, parser=self.parser)
def test_local_syntax(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
df = DataFrame(randn(100, 10), columns=list('abcdefghij'))
b = 1
expect = df[df.a < b]
result = df.query('a < @b', engine=engine, parser=parser)
assert_frame_equal(result, expect)
expect = df[df.a < df.b]
result = df.query('a < b', engine=engine, parser=parser)
assert_frame_equal(result, expect)
def test_chained_cmp_and_in(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
cols = list('abc')
df = DataFrame(randn(100, len(cols)), columns=cols)
res = df.query('a < b < c and a not in b not in c', engine=engine,
parser=parser)
ind = (df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b) # noqa
expec = df[ind]
assert_frame_equal(res, expec)
def test_local_variable_with_in(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
a = Series(np.random.randint(3, size=15), name='a')
b = Series(np.random.randint(10, size=15), name='b')
df = DataFrame({'a': a, 'b': b})
expected = df.loc[(df.b - 1).isin(a)]
result = df.query('b - 1 in a', engine=engine, parser=parser)
assert_frame_equal(expected, result)
b = Series(np.random.randint(10, size=15), name='b')
expected = df.loc[(b - 1).isin(a)]
result = df.query('@b - 1 in a', engine=engine, parser=parser)
assert_frame_equal(expected, result)
def test_at_inside_string(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
c = 1 # noqa
df = DataFrame({'a': ['a', 'a', 'b', 'b', '@c', '@c']})
result = df.query('a == "@c"', engine=engine, parser=parser)
expected = df[df.a == "@c"]
assert_frame_equal(result, expected)
def test_query_undefined_local(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.rand(10, 2), columns=list('ab'))
with tm.assert_raises_regex(UndefinedVariableError,
"local variable 'c' is not defined"):
df.query('a == @c', engine=engine, parser=parser)
def test_index_resolvers_come_after_columns_with_the_same_name(self):
n = 1 # noqa
a = np.r_[20:101:20]
df = DataFrame({'index': a, 'b': np.random.randn(a.size)})
df.index.name = 'index'
result = df.query('index > 5', engine=self.engine, parser=self.parser)
expected = df[df['index'] > 5]
assert_frame_equal(result, expected)
df = DataFrame({'index': a,
'b': np.random.randn(a.size)})
result = df.query('ilevel_0 > 5', engine=self.engine,
parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
assert_frame_equal(result, expected)
df = DataFrame({'a': a, 'b': np.random.randn(a.size)})
df.index.name = 'a'
result = df.query('a > 5', engine=self.engine, parser=self.parser)
expected = df[df.a > 5]
assert_frame_equal(result, expected)
result = df.query('index > 5', engine=self.engine, parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
assert_frame_equal(result, expected)
def test_inf(self):
n = 10
df = DataFrame({'a': np.random.rand(n), 'b': np.random.rand(n)})
df.loc[::2, 0] = np.inf
ops = '==', '!='
d = dict(zip(ops, (operator.eq, operator.ne)))
for op, f in d.items():
q = 'a %s inf' % op
expected = df[f(df.a, np.inf)]
result = df.query(q, engine=self.engine, parser=self.parser)
assert_frame_equal(result, expected)
@td.skip_if_no_ne
class TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas):
@classmethod
def setup_class(cls):
super(TestDataFrameQueryNumExprPython, cls).setup_class()
cls.engine = 'numexpr'
cls.parser = 'python'
cls.frame = TestData().frame
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('(dates1 < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates2'] = date_range('1/1/2013', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT
res = df.query('(dates1 < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.set_index('dates1', inplace=True, drop=True)
res = df.query('(index < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.iloc[0, 0] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('(index < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
with pytest.raises(NotImplementedError):
df.query('index < 20130101 < dates3', engine=engine, parser=parser)
def test_nested_scope(self):
from pandas.core.computation.ops import UndefinedVariableError
engine = self.engine
parser = self.parser
# smoke test
x = 1 # noqa
result = pd.eval('x + 1', engine=engine, parser=parser)
assert result == 2
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
# don't have the pandas parser
with pytest.raises(SyntaxError):
df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)
with pytest.raises(UndefinedVariableError):
df.query('(df>0) & (df2>0)', engine=engine, parser=parser)
expected = df[(df > 0) & (df2 > 0)]
result = pd.eval('df[(df > 0) & (df2 > 0)]', engine=engine,
parser=parser)
assert_frame_equal(expected, result)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
result = pd.eval('df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]',
engine=engine, parser=parser)
assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPandas(TestDataFrameQueryNumExprPandas):
@classmethod
def setup_class(cls):
super(TestDataFrameQueryPythonPandas, cls).setup_class()
cls.engine = 'python'
cls.parser = 'pandas'
cls.frame = TestData().frame
def test_query_builtin(self):
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
expected = df[df.index > 5]
result = df.query('sin > 5', engine=engine, parser=parser)
assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPython(TestDataFrameQueryNumExprPython):
@classmethod
def setup_class(cls):
super(TestDataFrameQueryPythonPython, cls).setup_class()
cls.engine = cls.parser = 'python'
cls.frame = TestData().frame
def test_query_builtin(self):
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
expected = df[df.index > 5]
result = df.query('sin > 5', engine=engine, parser=parser)
assert_frame_equal(expected, result)
class TestDataFrameQueryStrings(object):
def test_str_query_method(self, parser, engine):
df = DataFrame(randn(10, 1), columns=['b'])
df['strings'] = Series(list('aabbccddee'))
expect = df[df.strings == 'a']
if parser != 'pandas':
col = 'strings'
lst = '"a"'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = '==', '!='
ops = 2 * ([eq] + [ne])
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)
pytest.raises(NotImplementedError, df.query, ex,
engine=engine, parser=parser,
local_dict={'strings': df.strings})
else:
res = df.query('"a" == strings', engine=engine, parser=parser)
assert_frame_equal(res, expect)
res = df.query('strings == "a"', engine=engine, parser=parser)
assert_frame_equal(res, expect)
assert_frame_equal(res, df[df.strings.isin(['a'])])
expect = df[df.strings != 'a']
res = df.query('strings != "a"', engine=engine, parser=parser)
assert_frame_equal(res, expect)
res = df.query('"a" != strings', engine=engine, parser=parser)
assert_frame_equal(res, expect)
assert_frame_equal(res, df[~df.strings.isin(['a'])])
def test_str_list_query_method(self, parser, engine):
df = DataFrame(randn(10, 1), columns=['b'])
df['strings'] = Series(list('aabbccddee'))
expect = df[df.strings.isin(['a', 'b'])]
if parser != 'pandas':
col = 'strings'
lst = '["a", "b"]'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = '==', '!='
ops = 2 * ([eq] + [ne])
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)
with pytest.raises(NotImplementedError):
df.query(ex, engine=engine, parser=parser)
else:
res = df.query('strings == ["a", "b"]', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
res = df.query('["a", "b"] == strings', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
expect = df[~df.strings.isin(['a', 'b'])]
res = df.query('strings != ["a", "b"]', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
res = df.query('["a", "b"] != strings', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
def test_query_with_string_columns(self, parser, engine):
df = DataFrame({'a': list('aaaabbbbcccc'),
'b': list('aabbccddeeff'),
'c': np.random.randint(5, size=12),
'd': np.random.randint(9, size=12)})
if parser == 'pandas':
res = df.query('a in b', parser=parser, engine=engine)
expec = df[df.a.isin(df.b)]
assert_frame_equal(res, expec)
res = df.query('a in b and c < d', parser=parser, engine=engine)
expec = df[df.a.isin(df.b) & (df.c < df.d)]
assert_frame_equal(res, expec)
else:
with pytest.raises(NotImplementedError):
df.query('a in b', parser=parser, engine=engine)
with pytest.raises(NotImplementedError):
df.query('a in b and c < d', parser=parser, engine=engine)
def test_object_array_eq_ne(self, parser, engine):
df = DataFrame({'a': list('aaaabbbbcccc'),
'b': list('aabbccddeeff'),
'c': np.random.randint(5, size=12),
'd': np.random.randint(9, size=12)})
res = df.query('a == b', parser=parser, engine=engine)
exp = df[df.a == df.b]
assert_frame_equal(res, exp)
res = df.query('a != b', parser=parser, engine=engine)
exp = df[df.a != df.b]
assert_frame_equal(res, exp)
def test_query_with_nested_strings(self, parser, engine):
skip_if_no_pandas_parser(parser)
raw = """id event timestamp
1 "page 1 load" 1/1/2014 0:00:01
1 "page 1 exit" 1/1/2014 0:00:31
2 "page 2 load" 1/1/2014 0:01:01
2 "page 2 exit" 1/1/2014 0:01:31
3 "page 3 load" 1/1/2014 0:02:01
3 "page 3 exit" 1/1/2014 0:02:31
4 "page 1 load" 2/1/2014 1:00:01
4 "page 1 exit" 2/1/2014 1:00:31
5 "page 2 load" 2/1/2014 1:01:01
5 "page 2 exit" 2/1/2014 1:01:31
6 "page 3 load" 2/1/2014 1:02:01
6 "page 3 exit" 2/1/2014 1:02:31
"""
df = pd.read_csv(StringIO(raw), sep=r'\s{2,}', engine='python',
parse_dates=['timestamp'])
expected = df[df.event == '"page 1 load"']
res = df.query("""'"page 1 load"' in event""", parser=parser,
engine=engine)
assert_frame_equal(expected, res)
def test_query_with_nested_special_character(self, parser, engine):
skip_if_no_pandas_parser(parser)
df = DataFrame({'a': ['a', 'b', 'test & test'],
'b': [1, 2, 3]})
res = df.query('a == "test & test"', parser=parser, engine=engine)
expec = df[df.a == 'test & test']
assert_frame_equal(res, expec)
def test_query_lex_compare_strings(self, parser, engine):
import operator as opr
a = Series(np.random.choice(list('abcde'), 20))
b = Series(np.arange(a.size))
df = DataFrame({'X': a, 'Y': b})
ops = {'<': opr.lt, '>': opr.gt, '<=': opr.le, '>=': opr.ge}
for op, func in ops.items():
res = df.query('X %s "d"' % op, engine=engine, parser=parser)
expected = df[func(df.X, 'd')]
assert_frame_equal(res, expected)
def test_query_single_element_booleans(self, parser, engine):
columns = 'bid', 'bidsize', 'ask', 'asksize'
data = np.random.randint(2, size=(1, len(columns))).astype(bool)
df = DataFrame(data, columns=columns)
res = df.query('bid & ask', engine=engine, parser=parser)
expected = df[df.bid & df.ask]
assert_frame_equal(res, expected)
def test_query_string_scalar_variable(self, parser, engine):
skip_if_no_pandas_parser(parser)
df = pd.DataFrame({'Symbol': ['BUD US', 'BUD US', 'IBM US', 'IBM US'],
'Price': [109.70, 109.72, 183.30, 183.35]})
e = df[df.Symbol == 'BUD US']
symb = 'BUD US' # noqa
r = df.query('Symbol == @symb', parser=parser, engine=engine)
assert_frame_equal(e, r)
class TestDataFrameEvalWithFrame(object):
def setup_method(self, method):
self.frame = DataFrame(randn(10, 3), columns=list('abc'))
def teardown_method(self, method):
del self.frame
def test_simple_expr(self, parser, engine):
res = self.frame.eval('a + b', engine=engine, parser=parser)
expect = self.frame.a + self.frame.b
assert_series_equal(res, expect)
def test_bool_arith_expr(self, parser, engine):
res = self.frame.eval('a[a < 1] + b', engine=engine, parser=parser)
expect = self.frame.a[self.frame.a < 1] + self.frame.b
assert_series_equal(res, expect)
@pytest.mark.parametrize('op', ['+', '-', '*', '/'])
def test_invalid_type_for_operator_raises(self, parser, engine, op):
df = DataFrame({'a': [1, 2], 'b': ['c', 'd']})
with tm.assert_raises_regex(TypeError,
r"unsupported operand type\(s\) "
"for .+: '.+' and '.+'"):
df.eval('a {0} b'.format(op), engine=engine, parser=parser)
|
|
#!/usr/bin/env python
import os
import sys
import traceback
import time
###
import dbcon
import matchstate as ms
import proxy
import sandbox
import shorten
import rgkit.game
import tools
S_MATCH_REST = 2.0
TIME_RATE = 0.1
WIN_RATE = 0.05
SYMMETRIC = True
def get_cpu_time(pid):
clk_tck = float(os.sysconf(os.sysconf_names['SC_CLK_TCK']))
with open("/proc/%d/stat" % (pid,)) as fpath:
vals = fpath.read().split(' ')
time = sum(
int(f) / clk_tck for f in vals[13:15])
return time
def calc_score(scores):
if scores[0] == scores[1]:
return 0.5
return 1 if scores[0] > scores[1] else 0
def update_ratings(db, match, game_result):
MINUTE = 60
HOUR = 60 * MINUTE
DAY = 24 * HOUR
# WEEK = 7 * DAY
# MONTH = 30 * DAY
def get_k_factor(r1_rating, r2_rating, r1_update, r2_update):
k_factor = min(tools.get_k_factor(r1_rating),
tools.get_k_factor(r2_rating))
# Increase k_factor for recently updated bots.
if (time.time() - r1_update < DAY or
time.time() - r2_update < DAY):
k_factor = int(k_factor * 2)
elif (time.time() - r1_update < 3 * DAY or
time.time() - r2_update < 3 * DAY):
k_factor = int(k_factor * 1.5)
return k_factor
def new_rating(r1, r2, result, k_factor):
expected = 1.0 / (1 + pow(10.0, (r2 - r1) / 400.0))
return r1 + k_factor * (result - expected)
def get_rating_and_update_time(rid):
result = db.select(
'robots',
what='rating, last_updated',
where='id=$id',
vars={
'id': rid})
if not result:
return None, None
robot = result[0]
rating = robot['rating']
last_updated = robot['last_updated']
if rating is None:
return tools.DEFAULT_RATING, last_updated
return rating, last_updated
def get_ranking(rating):
query = '''
select count(*) as ranking
from robots r
where compiled and passed and not disabled and
r.rating > $rating + 1e-5
'''
robot = db.query(query, vars={'rating': rating})
return robot[0]['ranking']
rating1, updated1 = get_rating_and_update_time(match['r1_id'])
rating2, updated2 = get_rating_and_update_time(match['r2_id'])
k_factor = get_k_factor(rating1, rating2, updated1, updated2)
new_rating1 = new_rating(rating1, rating2, game_result, k_factor)
new_rating2 = new_rating(rating2, rating1, 1 - game_result, k_factor)
# ratings might have changed since the match was created
ranking1 = get_ranking(rating1)
ranking2 = get_ranking(rating2)
db.update('matches', where='id=$id', vars={'id': match['id']},
r1_rating=rating1, r2_rating=rating2,
r1_ranking=ranking1, r2_ranking=ranking2,
k_factor=k_factor)
db.update('robots', where='id=$id', vars={'id': match['r1_id']},
rating=new_rating1, last_opponent=match['r2_id'],
last_match=int(time.time()))
db.update('robots', where='id=$id', vars={'id': match['r2_id']},
rating=new_rating2, last_opponent=match['r1_id'],
last_match=int(time.time()))
def update_stats(db, match, r1_time, r2_time, score):
if r1_time is not None:
db.query('UPDATE robots SET time=time*(1-$r) + $t*$r WHERE id=$id',
vars={'id': match['r1_id'], 'r': TIME_RATE, 't': r1_time})
if r2_time is not None:
db.query('UPDATE robots SET time=time*(1-$r) + $t*$r WHERE id=$id',
vars={'id': match['r2_id'], 'r': TIME_RATE, 't': r2_time})
db.query('UPDATE robots SET winrate=winrate*(1-$r) + $t*$r WHERE id=$id',
vars={'id': match['r1_id'], 'r': WIN_RATE, 't': score})
db.query('UPDATE robots SET winrate=winrate*(1-$r) + $t*$r WHERE id=$id',
vars={'id': match['r2_id'], 'r': WIN_RATE, 't': 1 - score})
def run_game(db, match, output_file):
proxy_process1, proxy_process2 = None, None
try:
# TODO: Fix load_map, seriously.
sandbox.load_map()
output_file.write('---Starting Robot 1---\n')
proxy_process1, p1 = proxy.make_player(match['r1_code'], output_file)
if p1 is None:
db.update('robots', passed=False,
where='id=$id', vars={'id': match['r1_id']})
raise Exception('Robot 1 not able to be instantiated.')
output_file.write('---Starting Robot 2---\n')
proxy_process2, p2 = proxy.make_player(match['r2_code'], output_file)
if p2 is None:
db.update('robots', passed=False,
where='id=$id', vars={'id': match['r2_id']})
raise Exception('Robot 2 not able to be instantiated.')
g = rgkit.game.Game([p1,
p2],
record_actions=False,
record_history=True,
print_info=True,
seed=match['seed'],
symmetric=SYMMETRIC)
g.run_all_turns()
game_scores = g.get_scores()
r1_score, r2_score = game_scores
score = calc_score(game_scores)
history = g.history
match_data = shorten.dumps({'history': history, 'score': game_scores})
winner = {1: match['r1_id'], 0: match['r2_id'], 0.5: 0}[score]
output_file.write('---Time Taken---\n')
r1_time = None
r2_time = None
try:
r1_time = get_cpu_time(proxy_process1.pid)
r2_time = get_cpu_time(proxy_process2.pid)
output_file.write('R1: {0}\nR2: {1}\n'.format(r1_time, r2_time))
except Exception:
traceback.print_exc(file=output_file)
# turn off printing here because the output for data is huge
old_print = db.printing
db.printing = False
db.insert(
'history',
match_id=match['id'], data=match_data, timestamp=int(time.time()))
db.update(
'matches',
where='id=$id', vars={'id': match['id']},
winner=winner, state=ms.DONE,
r1_score=r1_score, r2_score=r2_score,
r1_time=r1_time, r2_time=r2_time,
timestamp=int(time.time()))
db.printing = old_print
if not proxy_process1.alive():
output_file.write('Robot 1 died.\n')
if not proxy_process2.alive():
output_file.write('Robot 2 died.\n')
return score, r1_time, r2_time
finally:
if proxy_process1 is not None:
proxy_process1.cleanup()
if proxy_process2 is not None:
proxy_process2.cleanup()
def run_match(db, match):
sys.stdout.flush()
sys.stderr.flush()
with open('/matchlog/%d' % match['id'], 'w+') as f:
try:
sys.stdout = sys.stderr = f
db.update('matches', where='id=$id', vars={'id': match['id']},
state=ms.RUNNING)
score, r1_time, r2_time = run_game(db, match, f)
if match['ranked']:
update_ratings(db, match, score)
update_stats(db, match, r1_time, r2_time, score)
except Exception:
traceback.print_exc(file=f)
db.update('matches', where='id=$id', state=ms.ERROR,
vars={'id': match['id']})
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
time.sleep(S_MATCH_REST)
def get_match(db, mid):
query = '''
select
matches.*,
r1.compiled_code as r1_code, r2.compiled_code as r2_code,
r1.rating as r1_rating, r2.rating as r2_rating,
r1.name as r1_name, r2.name as r2_name
from matches
join robots r1 on r1.id = matches.r1_id
join robots r2 on r2.id = matches.r2_id
where matches.id = $id'''
match = db.query(query, vars={'id': mid})
return match[0] if match else None
if __name__ == '__main__':
db = dbcon.connect_db()
if len(sys.argv) > 1:
match = get_match(db, int(sys.argv[1]))
run_game(db, match, sys.stdout)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for multiple virtual GPU support."""
import random
import numpy as np
from google.protobuf import text_format
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class VirtualGpuTestUtil(object):
def __init__(self,
dim=1000,
num_ops=100,
virtual_devices_per_gpu=None,
device_probabilities=None):
self._dim = dim
self._num_ops = num_ops
if virtual_devices_per_gpu is None:
self._virtual_devices_per_gpu = [3]
else:
self._virtual_devices_per_gpu = virtual_devices_per_gpu
self._visible_device_list = [
i for i in range(len(self._virtual_devices_per_gpu))
]
gpu_devices = [
('/gpu:' + str(i)) for i in range(sum(self._virtual_devices_per_gpu))
]
self.devices = ['/cpu:0'] + gpu_devices
self._num_devices = len(self.devices)
# Each virtual device gets 2GB memory.
self._mem_limits_mb = [
([1 << 11] * i) for i in self._virtual_devices_per_gpu
]
self.config = self._GetSessionConfig()
if device_probabilities is not None:
self._device_probabilities = list(device_probabilities) # Deep copy
for i in range(1, self._num_devices):
self._device_probabilities[i] += self._device_probabilities[i - 1]
else:
# Each device gets same probability to be assigned an operation.
step = 1.0 / self._num_devices
self._device_probabilities = [
(x + 1) * step for x in range(self._num_devices)
]
# To prevent rounding error causing problems.
self._device_probabilities[self._num_devices - 1] = 1.1
logging.info('dim: %d', self._dim)
logging.info('num_ops: %d', self._num_ops)
logging.info('visible_device_list: %s', str(self._visible_device_list))
logging.info('virtual_devices_per_gpu: %s',
str(self._virtual_devices_per_gpu))
logging.info('mem_limits: %s', str(self._mem_limits_mb))
logging.info('devices: %s', str(self.devices))
logging.info('config: %s', text_format.MessageToString(self.config))
logging.info('device_probabilities: %s', str(self._device_probabilities))
# Creates virtual GPU devices
def _GetSessionConfig(self):
virtual_device_gpu_options = config_pb2.GPUOptions(
visible_device_list=','.join(str(d) for d in self._visible_device_list),
experimental=config_pb2.GPUOptions.Experimental(virtual_devices=[
config_pb2.GPUOptions.Experimental.VirtualDevices(
memory_limit_mb=i) for i in self._mem_limits_mb
]))
return config_pb2.ConfigProto(gpu_options=virtual_device_gpu_options)
# Generates a list of 3-tuples, each tuple contains the source and destination
# device index for a binary operation like 'add', like:
# (src_device_1, src_device_2, dst_device)
def _GenerateOperationPlacement(self):
result = []
for unused_i in range(self._num_ops):
op_device = ()
for unused_j in range(3):
random_num = random.random()
for device_index in range(self._num_devices):
if self._device_probabilities[device_index] > random_num:
op_device += (device_index,)
break
result.append(op_device)
return result
# Logs part of the matrix for debugging purposes.
def _LogMatrix(self, mat, dim):
logging.info('---- printing the first 10*10 submatrix ----')
for i in range(min(10, dim)):
row = ''
for j in range(min(10, dim)):
row += ' ' + str(mat[i][j])
logging.info(row)
# Runs a list of 'add' operations where each operation satisfies the device
# placement constraints in `op_placement`, and returns the result.
def _TestRandomGraphWithDevices(self,
sess,
seed,
op_placement,
devices,
debug_mode=False):
data = []
shape = (self._dim, self._dim)
feed_dict = {}
# Initialize the matrices
for i in range(len(devices)):
with ops.device(devices[i]):
var = array_ops.placeholder(dtypes.float32, shape=shape)
np.random.seed(seed + i)
feed_dict[var] = np.random.uniform(
low=0, high=0.1, size=shape).astype(np.float32)
data.append(var)
# Run the 'add' operations on those matrices
for op in op_placement:
with ops.device(devices[op[2]]):
data[op[2]] = math_ops.add(data[op[0]], data[op[1]])
with ops.device('/cpu:0'):
s = data[0]
for i in range(1, len(data)):
s = math_ops.add(s, data[i])
if debug_mode:
logging.info(ops.get_default_graph().as_graph_def())
result = sess.run(s, feed_dict=feed_dict)
self._LogMatrix(result, self._dim)
return result
# Generates a random graph with `self._num_ops` 'add' operations with each
# operation placed on different virtual device, test that the result is
# identical to the result obtained by running the same graph on cpu only.
def TestRandomGraph(self, sess, op_placement=None, random_seed=None):
debug_mode = False
if op_placement is None:
op_placement = self._GenerateOperationPlacement()
else:
debug_mode = True
if random_seed is None:
random_seed = random.randint(0, 1 << 31)
else:
debug_mode = True
logging.info('Virtual gpu functional test for random graph...')
logging.info('operation placement: %s', str(op_placement))
logging.info('random seed: %d', random_seed)
# Run with multiple virtual gpus.
result_vgd = self._TestRandomGraphWithDevices(
sess, random_seed, op_placement, self.devices, debug_mode=debug_mode)
# Run with single cpu.
result_cpu = self._TestRandomGraphWithDevices(
sess,
random_seed,
op_placement, ['/cpu:0'] * self._num_devices,
debug_mode=debug_mode)
# Test the result
for i in range(self._dim):
for j in range(self._dim):
if result_vgd[i][j] != result_cpu[i][j]:
logging.error(
'Result mismatch at row %d column %d: expected %f, actual %f', i,
j, result_cpu[i][j], result_vgd[i][j])
logging.error('Devices: %s', self.devices)
logging.error('Memory limits (in MB): %s', self._mem_limits_mb)
return False
return True
class VirtualGpuTest(test_util.TensorFlowTestCase):
def __init__(self, method_name):
super(VirtualGpuTest, self).__init__(method_name)
self._util = VirtualGpuTestUtil()
@test_util.deprecated_graph_mode_only
def testStatsContainAllDeviceNames(self):
with self.session(config=self._util.config) as sess:
# TODO(laigd): b/70811538. The is_gpu_available() call will invoke
# DeviceFactory::AddDevices() with a default SessionOption, which prevents
# adding virtual devices in the future, thus must be called within a
# context of a session within which virtual devices are created. Same in
# the following test case.
if not test.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
mat_shape = [10, 10]
data = []
for d in self._util.devices:
with ops.device(d):
var = variables.Variable(random_ops.random_uniform(mat_shape))
self.evaluate(var.initializer)
data.append(var)
s = data[0]
for i in range(1, len(data)):
s = math_ops.add(s, data[i])
sess.run(s, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
step_stats = run_metadata.step_stats
devices = [d.device for d in step_stats.dev_stats]
self.assertTrue('/job:localhost/replica:0/task:0/device:CPU:0' in devices)
self.assertTrue('/job:localhost/replica:0/task:0/device:GPU:0' in devices)
self.assertTrue('/job:localhost/replica:0/task:0/device:GPU:1' in devices)
self.assertTrue('/job:localhost/replica:0/task:0/device:GPU:2' in devices)
@test_util.deprecated_graph_mode_only
def testLargeRandomGraph(self):
with self.session(config=self._util.config) as sess:
if not test.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
for _ in range(5):
if not self._util.TestRandomGraph(sess):
return
if __name__ == '__main__':
test.main()
|
|
import click
import csv
from sklearn import tree
from sklearn import ensemble
try: # later versions of sklearn move the scoring tools
from sklearn import model_selection as mscv
except ImportError:
from sklearn import cross_validation as mscv
class Contig(object): # to store contigs for analysis
def __init__(self, contigid, variables={}, classification=False):
self.contigid = contigid
self.variables = variables
self.classification = classification
def parseTaxdump(blastdb, createDict):
"""
Parses a local copy of the NCBI Taxonomy dump for use by later functions.
Args:
blastdb: A string containing the root directory for the NCBI taxdump.
createDict: True to create taxon dictionary for runfile mode
Returns:
taxdump: A dictionary mapping NCBI taxon ids to a tuple containing the
taxid of the parent taxon and the name of the current taxon.
taxdict: A dictionary mapping human readable names to taxids
"""
taxdump = {}
taxidDict = {}
with open(blastdb + "/names.dmp") as names:
click.echo("Reading names.dmp")
namesReader = list(csv.reader(names, delimiter="|"))
with click.progressbar(namesReader) as nr: # human readable names
for line in nr:
if "scientific name" in line[3].strip():
taxdump[line[0].strip()] = line[1].strip() # {taxid: Name}
if createDict:
taxidDict[line[1].strip()] = line[0].strip() # {Name: taxid}
with open(blastdb + "/nodes.dmp") as nodes:
click.echo("Reading nodes.dmp")
nodesReader = list(csv.reader(nodes, delimiter="|"))
with click.progressbar(nodesReader) as nr: # connection between taxids
for line in nr:
taxdump[line[0].strip()] = [taxdump[line[0].strip()], line[1].strip(), line[2].strip()] # {taxid: [Name, Parent, Rank]}
with open(blastdb + "/merged.dmp") as merged: # merged taxids are the same organism, effectively
click.echo("Reading merged.dmp")
mergedReader = list(csv.reader(merged, delimiter="|"))
with click.progressbar(mergedReader) as mr:
for line in mr:
taxdump[line[0].strip()] = ["merged", line[1].strip()]
with open(blastdb + "/delnodes.dmp") as delnodes: # if a taxid is deleted than the BLAST DB and taxdump are too far out of sync
click.echo("Reading delnodes.dmp")
delReader = list(csv.reader(delnodes, delimiter="|"))
with click.progressbar(delReader) as dr:
for line in dr:
taxdump[line[0].strip()] = ["deleted"]
return taxdump, taxidDict
def taxidToLineage(taxid, taxdump, classificationLevel):
"""
Recursively converts an NCBI taxid to a full phylogenetic lineage.
Args:
taxid: The taxid to look up.
taxdump: The NCBI taxonomy dump as parsed by parseTaxDump
classificationLevel: The level of classification to save into the corpus.
Returns:
classification: A string containing the lineage at the chosen classification level.
"""
try:
taxid = taxid.split(";")[0]
tax = taxdump[taxid]
if "deleted" in tax: # taxid has been removed from NCBI db, indicates that taxdump and blastdb are out of sync
raise Exception("ERROR: Taxon id %s has been deleted from the NCBI DB.\nPlease update your databases and re-run BLAST." % taxid)
if "merged" in tax:
merge = tax[1]
return taxidToLineage(merge, taxdump, classificationLevel)
if tax[2].lower() == classificationLevel.lower(): # final answer
return tax[0]
elif taxid != '1': # recursive call
return taxidToLineage(tax[1], taxdump, classificationLevel)
elif taxid == '1': # nohit
return("nohit")
else: # edge case
raise Exception("Taxid %s has failed to resolve." % taxid)
except KeyError:
raise Exception("Taxon id %s was not found in the NCBI DB.\nPlease update your DB and try again." % taxid)
def constructCorpus(contigs, classMap, binary, target):
"""
Construct a corpus, or body of training data for the decision tree, as well as the data under test.
Args:
contigs: A list of sidr.common.Contig objects with test variables.
classMap: A dictionary mapping class names to their class id used by scikit-learn.
binary: Set True to use "binary" (target/nontarget) classification for the model.
target: The name of the target classification.
Returns:
corpus: A list of lists, containing the GC content, coverage, and class number.
testdata: Data that was not classified by BLAST and will later be classified by the
trained model.
features: List of variables used by each contig.
"""
corpus = []
testdata = []
features = []
for contig in contigs:
variableBuf = list(contig.variables.values())
features = list(contig.variables.keys())
if contig.classification:
if binary:
if contig.classification.lower() == target.lower():
variableBuf.append("target")
corpus.append(variableBuf)
else:
variableBuf.append("nontarget")
corpus.append(variableBuf)
else:
variableBuf.append(contig.classification)
corpus.append(variableBuf)
else:
variableBuf.insert(0, contig.contigid)
testdata.append(variableBuf)
return corpus, testdata, features
def constructModel(corpus, classList, features, modelOutput):
"""
Trains a Decision Tree model on the test corpus.
Args:
corpus: A list of lists, containing the GC content, coverage, and class number.
classList: A list of class names.
features: List of variables used by each contig.
modelOutput: Location to save model as GraphViz DOT, or False to save no model.
Returns:
classifier: A DecisionTreeClassifier object that has been trained on the test corpus.
"""
corpus.sort() # just in case
X = []
Y = []
for item in corpus:
X.append(item[:-1]) # all but the last item
Y.append(item[-1]) # only the last item
X_train, X_test, Y_train, Y_test = mscv.train_test_split(X, Y, test_size=0.3, random_state=0)
# TODO: implement classifier testing and comparison, now only baggingClassifier is used as per paper
#treeClassifier = tree.DecisionTreeClassifier()
#treeClassifier = treeClassifier.fit(X_train, Y_train)
#click.echo("Decision tree classifier built, score is %s out of 1.00" % treeClassifier.score(X_test, Y_test))
baggingClassifier = ensemble.BaggingClassifier()
baggingClassifier = baggingClassifier.fit(X_train, Y_train)
click.echo("Bagging classifier built, score is %s out of 1.00" % baggingClassifier.score(X_test, Y_test))
#forestClassifier = ensemble.RandomForestClassifier(n_estimators=10)
#forestClassifier = forestClassifier.fit(X_train, Y_train)
#click.echo("Random forest classifier built, score is %s out of 1.00" % forestClassifier.score(X_test, Y_test))
#adaClassifier = ensemble.AdaBoostClassifier(n_estimators=100)
#adaClassifier = adaClassifier.fit(X_train, Y_train)
#click.echo("AdaBoost classifier built, score is %s out of 1.00" % adaClassifier.score(X_test, Y_test))
#gradientClassifier = ensemble.GradientBoostingClassifier(n_estimators=100)
#gradientClassifier = gradientClassifier.fit(X_train, Y_train)
#click.echo("Gradient tree boosting classifier built, score is %s out of 1.00" % gradientClassifier.score(X_test, Y_test))
if modelOutput:
with open(modelOutput, 'w') as dotfile:
tree.export_graphviz(baggingClassifier, out_file=dotfile, feature_names=features,
class_names=classList, filled=True, rounded=True, special_characters=True)
return baggingClassifier
def classifyData(classifier, testdata, classMap):
"""
Classifies data based on the previously trained model.
Args:
classifier: A classifier object that has been trained on the test corpus.
testdata: A dataset to classify based on reads.
classMap: A dictionary mapping class names to their class id used by scikit-learn.
Returns:
result: Test data as classified by the model.
"""
testdata.sort()
X = []
# testdata = [[contigID, variable1, variable2, ...], ...]
for item in testdata:
X.append(item[1::]) # all but the first item
Y = classifier.predict(X)
# Step one: transpose the testdata matrix and extract all contigIDs
contigIDs = list(zip(*testdata))[0] # https://stackoverflow.com/questions/4937491/matrix-transpose-in-python
# Step two: combine the contigIDs with the results from the classifier
result = list(zip(contigIDs, Y))
return result
def generateOutput(tokeep, toremove, result, contigs, target, output):
"""
Generates output files for completed runs.
Args:
tokeep: File-like object to output target contigs to.
toremove: File-like object to output non-target contigs to.
result: Classified data from sidr.common.classifyData().
contigs: List of sidr.common.Contig objects from input.
target: Target classification.
output: File-like object to save complete results to.
"""
targetContigIDs = []
nontargetContigIDs = []
outputLines = []
for contig in contigs:
if contig.classification:
if target.lower() == contig.classification.lower():
targetContigIDs.append(contig.contigid)
else:
nontargetContigIDs.append(contig.contigid)
outputLines.append([contig.contigid, contig.classification, "input"])
for contig in result:
if target == contig[1]:
targetContigIDs.append(contig[0])
elif "target" == contig[1]:
targetContigIDs.append(contig[0])
else:
nontargetContigIDs.append(contig[0])
outputLines.append([contig[0], contig[1], "dt"])
with open(output, "w+") as f:
f.write("contigid, classification, source\n")
for ln in outputLines:
f.write("%s, %s, %s\n" % (ln[0], ln[1], ln[2])) # https://stackoverflow.com/questions/899103/writing-a-list-to-a-file-with-python for %s\n suggestion
if tokeep:
with open(tokeep, "w+") as f:
for i in targetContigIDs:
f.write("%s\n" % i )
if toremove:
with open(toremove, "w+") as f:
for i in nontargetContigIDs:
f.write("%s\n" % i)
|
|
# flake8: noqa I201
from Child import Child
from Node import Node
DECL_NODES = [
# type-assignment -> '=' type
Node('TypeInitializerClause', kind='Syntax',
children=[
Child('Equal', kind='EqualToken'),
Child('Value', kind='Type'),
]),
# typealias-declaration -> attributes? access-level-modifier? 'typealias'
# typealias-name generic-parameter-clause?
# type-assignment
# typealias-name -> identifier
Node('TypealiasDecl', kind='Decl', traits=['IdentifiedDecl'],
children=[
Child('Attributes', kind='AttributeList',
is_optional=True),
Child('Modifiers', kind='ModifierList', is_optional=True),
Child('TypealiasKeyword', kind='TypealiasToken'),
Child('Identifier', kind='IdentifierToken'),
Child('GenericParameterClause', kind='GenericParameterClause',
is_optional=True),
Child('Initializer', kind='TypeInitializerClause',
is_optional=True),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
]),
# associatedtype-declaration -> attributes? access-level-modifier?
# 'associatedtype' associatedtype-name
# inheritance-clause? type-assignment?
# generic-where-clause?
# associatedtype-name -> identifier
Node('AssociatedtypeDecl', kind='Decl', traits=['IdentifiedDecl'],
children=[
Child('Attributes', kind='AttributeList',
is_optional=True),
Child('Modifiers', kind='ModifierList', is_optional=True),
Child('AssociatedtypeKeyword', kind='AssociatedtypeToken'),
Child('Identifier', kind='IdentifierToken'),
Child('InheritanceClause', kind='TypeInheritanceClause',
is_optional=True),
Child('Initializer', kind='TypeInitializerClause',
is_optional=True),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
]),
Node('FunctionParameterList', kind='SyntaxCollection',
element='FunctionParameter'),
Node('ParameterClause', kind='Syntax',
traits=['Parenthesized'],
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('ParameterList', kind='FunctionParameterList'),
Child('RightParen', kind='RightParenToken'),
]),
# -> Type
Node('ReturnClause', kind='Syntax',
children=[
Child('Arrow', kind='ArrowToken'),
Child('ReturnType', kind='Type'),
]),
# function-signature ->
# '(' parameter-list? ')' (throws | rethrows)? '->'? type?
Node('FunctionSignature', kind='Syntax',
children=[
Child('Input', kind='ParameterClause'),
Child('ThrowsOrRethrowsKeyword', kind='Token',
is_optional=True,
token_choices=[
'ThrowsToken',
'RethrowsToken',
]),
Child('Output', kind='ReturnClause', is_optional=True),
]),
# if-config-clause ->
# ('#if' | '#elseif' | '#else') expr? (stmt-list | switch-case-list)
Node('IfConfigClause', kind='Syntax',
children=[
Child('PoundKeyword', kind='Token',
classification='BuildConfigId',
token_choices=[
'PoundIfToken',
'PoundElseifToken',
'PoundElseToken',
]),
Child('Condition', kind='Expr', classification='BuildConfigId',
is_optional=True),
Child('Elements', kind='Syntax',
node_choices=[
Child('Statements', kind='CodeBlockItemList'),
Child('SwitchCases', kind='SwitchCaseList'),
Child('Decls', kind='MemberDeclList'),
]),
]),
Node('IfConfigClauseList', kind='SyntaxCollection',
element='IfConfigClause'),
# if-config-decl -> '#if' expr stmt-list else-if-directive-clause-list
# else-clause? '#endif'
Node('IfConfigDecl', kind='Decl',
children=[
Child('Clauses', kind='IfConfigClauseList'),
Child('PoundEndif', kind='PoundEndifToken',
classification='BuildConfigId'),
]),
Node('PoundErrorDecl', kind='Decl',
traits=['Parenthesized'],
children=[
Child('PoundError', kind='PoundErrorToken'),
Child('LeftParen', kind='LeftParenToken'),
Child('Message', kind='StringLiteralExpr'),
Child('RightParen', kind='RightParenToken')
]),
Node('PoundWarningDecl', kind='Decl',
traits=['Parenthesized'],
children=[
Child('PoundWarning', kind='PoundWarningToken'),
Child('LeftParen', kind='LeftParenToken'),
Child('Message', kind='StringLiteralExpr'),
Child('RightParen', kind='RightParenToken')
]),
Node('PoundSourceLocation', kind='Decl',
traits=['Parenthesized'],
children=[
Child('PoundSourceLocation', kind='PoundSourceLocationToken'),
Child('LeftParen', kind='LeftParenToken'),
Child('Args', kind='PoundSourceLocationArgs', is_optional=True),
Child('RightParen', kind='RightParenToken')
]),
Node('PoundSourceLocationArgs', kind='Syntax',
children=[
Child('FileArgLabel', kind='IdentifierToken',
text_choices=['file']),
Child('FileArgColon', kind='ColonToken'),
Child('FileName', kind='StringLiteralToken'),
Child('Comma', kind='CommaToken'),
Child('LineArgLabel', kind='IdentifierToken',
text_choices=['line']),
Child('LineArgColon', kind='ColonToken'),
Child('LineNumber', kind='IntegerLiteralToken'),
]),
Node('DeclModifier', kind='Syntax',
children=[
Child('Name', kind='Token', classification='Attribute',
text_choices=[
'class', 'convenience', 'dynamic', 'final', 'infix',
'lazy', 'optional', 'override', 'postfix', 'prefix',
'required', 'static', 'unowned', 'weak', 'private',
'fileprivate', 'internal', 'public', 'open',
'mutating', 'nonmutating', 'indirect', '__consuming'
]),
Child('DetailLeftParen', kind='LeftParenToken', is_optional=True),
Child('Detail', kind='IdentifierToken', is_optional=True),
Child('DetailRightParen', kind='RightParenToken', is_optional=True),
]),
Node('InheritedType', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('TypeName', kind='Type'),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
Node('InheritedTypeList', kind='SyntaxCollection',
element='InheritedType'),
# type-inheritance-clause -> ':' type
Node('TypeInheritanceClause', kind='Syntax',
children=[
Child('Colon', kind='ColonToken'),
Child('InheritedTypeCollection', kind='InheritedTypeList'),
]),
# class-declaration -> attributes? access-level-modifier?
# 'class' class-name
# generic-parameter-clause?
# type-inheritance-clause?
# generic-where-clause?
# '{' class-members '}'
# class-name -> identifier
Node('ClassDecl', kind='Decl',
traits=['DeclGroup', 'IdentifiedDecl'],
children=[
Child('Attributes', kind='AttributeList',
is_optional=True),
Child('Modifiers', kind='ModifierList', is_optional=True),
Child('ClassKeyword', kind='ClassToken'),
Child('Identifier', kind='IdentifierToken'),
Child('GenericParameterClause', kind='GenericParameterClause',
is_optional=True),
Child('InheritanceClause', kind='TypeInheritanceClause',
is_optional=True),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
Child('Members', kind='MemberDeclBlock'),
]),
# struct-declaration -> attributes? access-level-modifier?
# 'struct' struct-name
# generic-parameter-clause?
# type-inheritance-clause?
# generic-where-clause?
# '{' struct-members '}'
# struct-name -> identifier
Node('StructDecl', kind='Decl',
traits=['DeclGroup', 'IdentifiedDecl'],
children=[
Child('Attributes', kind='AttributeList',
is_optional=True),
Child('Modifiers', kind='ModifierList', is_optional=True),
Child('StructKeyword', kind='StructToken'),
Child('Identifier', kind='IdentifierToken'),
Child('GenericParameterClause', kind='GenericParameterClause',
is_optional=True),
Child('InheritanceClause', kind='TypeInheritanceClause',
is_optional=True),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
Child('Members', kind='MemberDeclBlock'),
]),
Node('ProtocolDecl', kind='Decl',
traits=['DeclGroup', 'IdentifiedDecl'],
children=[
Child('Attributes', kind='AttributeList',
is_optional=True),
Child('Modifiers', kind='ModifierList', is_optional=True),
Child('ProtocolKeyword', kind='ProtocolToken'),
Child('Identifier', kind='IdentifierToken'),
Child('InheritanceClause', kind='TypeInheritanceClause',
is_optional=True),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
Child('Members', kind='MemberDeclBlock'),
]),
# extension-declaration -> attributes? access-level-modifier?
# 'extension' extended-type
# type-inheritance-clause?
# generic-where-clause?
# '{' extension-members '}'
# extension-name -> identifier
Node('ExtensionDecl', kind='Decl', traits=['DeclGroup'],
children=[
Child('Attributes', kind='AttributeList',
is_optional=True),
Child('Modifiers', kind='ModifierList', is_optional=True),
Child('ExtensionKeyword', kind='ExtensionToken'),
Child('ExtendedType', kind='Type'),
Child('InheritanceClause', kind='TypeInheritanceClause',
is_optional=True),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
Child('Members', kind='MemberDeclBlock'),
]),
Node('MemberDeclBlock', kind='Syntax', traits=['Braced'],
children=[
Child('LeftBrace', kind='LeftBraceToken'),
Child('Members', kind='MemberDeclList'),
Child('RightBrace', kind='RightBraceToken'),
]),
# member-decl-list = member-decl member-decl-list?
Node('MemberDeclList', kind='SyntaxCollection',
element='MemberDeclListItem'),
# member-decl = decl ';'?
Node('MemberDeclListItem', kind='Syntax',
description='''
A member declaration of a type consisting of a declaration and an \
optional semicolon;
''',
children=[
Child('Decl', kind='Decl',
description='The declaration of the type member.'),
Child('Semicolon', kind='SemicolonToken', is_optional=True,
description='An optional trailing semicolon.'),
]),
# source-file = code-block-item-list eof
Node('SourceFile', kind='Syntax',
traits=['WithStatements'],
children=[
Child('Statements', kind='CodeBlockItemList'),
Child('EOFToken', kind='EOFToken')
]),
# initializer -> '=' expr
Node('InitializerClause', kind='Syntax',
children=[
Child('Equal', kind='EqualToken'),
Child('Value', kind='Expr'),
]),
# parameter ->
# external-parameter-name? local-parameter-name ':'
# type '...'? '='? expression? ','?
Node('FunctionParameter', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Attributes', kind='AttributeList',
is_optional=True),
Child('FirstName', kind='Token',
token_choices=[
'IdentifierToken',
'WildcardToken',
],
is_optional=True),
# One of these two names needs be optional, we choose the second
# name to avoid backtracking.
Child('SecondName', kind='Token',
token_choices=[
'IdentifierToken',
'WildcardToken',
],
is_optional=True),
Child('Colon', kind='ColonToken',
is_optional=True),
Child('Type', kind='Type',
is_optional=True),
Child('Ellipsis', kind='Token',
is_optional=True),
Child('DefaultArgument', kind='InitializerClause',
is_optional=True),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# declaration-modifier -> access-level-modifier
# | mutation-modifier
# | 'class'
# | 'convenience'
# | 'dynamic'
# | 'final'
# | 'infix'
# | 'lazy'
# | 'optional'
# | 'override'
# | 'postfix'
# | 'prefix'
# | 'required'
# | 'static'
# | 'unowned'
# | 'unowned(safe)'
# | 'unowned(unsafe)'
# | 'weak'
# mutation-modifier -> 'mutating' | 'nonmutating'
Node('ModifierList', kind='SyntaxCollection',
element='DeclModifier',
element_name='Modifier'),
Node('FunctionDecl', kind='Decl', traits=['IdentifiedDecl'],
children=[
Child('Attributes', kind='AttributeList',
is_optional=True),
Child('Modifiers', kind='ModifierList',
is_optional=True),
Child('FuncKeyword', kind='FuncToken'),
Child('Identifier', kind='Token',
token_choices=[
'IdentifierToken',
'UnspacedBinaryOperatorToken',
'SpacedBinaryOperatorToken',
'PrefixOperatorToken',
'PostfixOperatorToken',
]),
Child('GenericParameterClause', kind='GenericParameterClause',
is_optional=True),
Child('Signature', kind='FunctionSignature'),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
# the body is not necessary inside a protocol definition
Child('Body', kind='CodeBlock', is_optional=True),
]),
Node('InitializerDecl', kind='Decl',
children=[
Child('Attributes', kind='AttributeList',
is_optional=True),
Child('Modifiers', kind='ModifierList',
is_optional=True),
Child('InitKeyword', kind='InitToken'),
Child('OptionalMark', kind='Token',
token_choices=[
'PostfixQuestionMarkToken',
'InfixQuestionMarkToken',
'ExclamationMarkToken',
],
is_optional=True),
Child('GenericParameterClause', kind='GenericParameterClause',
is_optional=True),
Child('Parameters', kind='ParameterClause'),
Child('ThrowsOrRethrowsKeyword', kind='Token',
is_optional=True,
token_choices=[
'ThrowsToken',
'RethrowsToken',
]),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
# the body is not necessary inside a protocol definition
Child('Body', kind='CodeBlock', is_optional=True),
]),
Node('DeinitializerDecl', kind='Decl',
children=[
Child('Attributes', kind='AttributeList',
is_optional=True),
Child('Modifiers', kind='ModifierList',
is_optional=True),
Child('DeinitKeyword', kind='DeinitToken'),
Child('Body', kind='CodeBlock'),
]),
Node('SubscriptDecl', kind='Decl',
children=[
Child('Attributes', kind='AttributeList',
is_optional=True),
Child('Modifiers', kind='ModifierList',
is_optional=True),
Child('SubscriptKeyword', kind='SubscriptToken'),
Child('GenericParameterClause', kind='GenericParameterClause',
is_optional=True),
Child('Indices', kind='ParameterClause'),
Child('Result', kind='ReturnClause'),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
# the body is not necessary inside a protocol definition
Child('Accessor', kind='AccessorBlock', is_optional=True),
]),
# access-level-modifier -> 'private' | 'private' '(' 'set' ')'
# | 'fileprivate' | 'fileprivate' '(' 'set' ')'
# | 'internal' | 'internal' '(' 'set' ')'
# | 'public' | 'public' '(' 'set' ')'
# | 'open' | 'open' '(' 'set' ')'
Node('AccessLevelModifier', kind='Syntax',
children=[
Child('Name', kind='IdentifierToken'),
Child('LeftParen', kind='LeftParenToken',
is_optional=True),
Child('Modifier', kind='IdentifierToken',
is_optional=True),
Child('RightParen', kind='RightParenToken',
is_optional=True),
]),
Node('AccessPathComponent', kind='Syntax',
children=[
Child('Name', kind='IdentifierToken'),
Child('TrailingDot', kind='PeriodToken', is_optional=True),
]),
Node('AccessPath', kind='SyntaxCollection', element='AccessPathComponent'),
Node('ImportDecl', kind='Decl',
children=[
Child('Attributes', kind='AttributeList', is_optional=True),
Child('Modifiers', kind='ModifierList',
is_optional=True),
Child('ImportTok', kind='ImportToken'),
Child('ImportKind', kind='Token', is_optional=True,
token_choices=[
'TypealiasToken', 'StructToken', 'ClassToken',
'EnumToken', 'ProtocolToken', 'VarToken', 'LetToken',
'FuncToken',
]),
Child('Path', kind='AccessPath'),
]),
# (value)
Node('AccessorParameter', kind='Syntax',
traits=['Parenthesized'],
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('Name', kind='IdentifierToken'),
Child('RightParen', kind='RightParenToken'),
]),
Node('AccessorDecl', kind='Decl',
children=[
Child('Attributes', kind='AttributeList', is_optional=True),
Child('Modifier', kind='DeclModifier', is_optional=True),
Child('AccessorKind', kind='Token',
text_choices=[
'get', 'set', 'didSet', 'willSet', 'unsafeAddress',
'addressWithOwner', 'addressWithNativeOwner',
'addressWithPinnedNativeOwner', 'unsafeMutableAddress',
'mutableAddressWithOwner',
'mutableAddressWithNativeOwner',
'mutableAddressWithPinnedNativeOwner',
'_read', '_modify'
]),
Child('Parameter', kind='AccessorParameter', is_optional=True),
Child('Body', kind='CodeBlock', is_optional=True),
]),
Node('AccessorList', kind="SyntaxCollection", element='AccessorDecl'),
Node('AccessorBlock', kind="Syntax", traits=['Braced'],
children=[
Child('LeftBrace', kind='LeftBraceToken'),
Child('AccessorListOrStmtList', kind='Syntax',
node_choices=[
Child('Accessors', kind='AccessorList'),
Child('Statements', kind='CodeBlockItemList')]),
Child('RightBrace', kind='RightBraceToken'),
]),
# Pattern: Type = Value { get {} },
Node('PatternBinding', kind="Syntax",
traits=['WithTrailingComma'],
children=[
Child('Pattern', kind='Pattern'),
Child('TypeAnnotation', kind='TypeAnnotation', is_optional=True),
Child('Initializer', kind='InitializerClause', is_optional=True),
Child('Accessor', kind='AccessorBlock', is_optional=True),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
Node('PatternBindingList', kind="SyntaxCollection",
element='PatternBinding'),
Node('VariableDecl', kind='Decl',
children=[
Child('Attributes', kind='AttributeList', is_optional=True),
Child('Modifiers', kind='ModifierList', is_optional=True),
Child('LetOrVarKeyword', kind='Token',
token_choices=[
'LetToken', 'VarToken',
]),
Child('Bindings', kind='PatternBindingList'),
]),
Node('EnumCaseElement', kind='Syntax',
description='''
An element of an enum case, containing the name of the case and, \
optionally, either associated values or an assignment to a raw value.
''',
traits=['WithTrailingComma'],
children=[
Child('Identifier', kind='IdentifierToken',
description='The name of this case.'),
Child('AssociatedValue', kind='ParameterClause', is_optional=True,
description='The set of associated values of the case.'),
Child('RawValue', kind='InitializerClause', is_optional=True,
description='''
The raw value of this enum element, if present.
'''),
Child('TrailingComma', kind='CommaToken', is_optional=True,
description='''
The trailing comma of this element, if the case has \
multiple elements.
'''),
]),
Node('EnumCaseElementList', kind='SyntaxCollection',
description='A collection of 0 or more `EnumCaseElement`s.',
element='EnumCaseElement'),
Node('EnumCaseDecl', kind='Decl',
description='''
A `case` declaration of a Swift `enum`. It can have 1 or more \
`EnumCaseElement`s inside, each declaring a different case of the
enum.
''',
children=[
Child('Attributes', kind='AttributeList', is_optional=True,
description='''
The attributes applied to the case declaration.
'''),
Child('Modifiers', kind='ModifierList', is_optional=True,
description='''
The declaration modifiers applied to the case declaration.
'''),
Child('CaseKeyword', kind='CaseToken',
description='The `case` keyword for this case.'),
Child('Elements', kind='EnumCaseElementList',
description='The elements this case declares.')
]),
Node('EnumDecl', kind='Decl', traits=['IdentifiedDecl'],
description='A Swift `enum` declaration.',
children=[
Child('Attributes', kind='AttributeList', is_optional=True,
description='''
The attributes applied to the enum declaration.
'''),
Child('Modifiers', kind='ModifierList', is_optional=True,
description='''
The declaration modifiers applied to the enum declaration.
'''),
Child('EnumKeyword', kind='EnumToken',
description='''
The `enum` keyword for this declaration.
'''),
Child('Identifier', kind='IdentifierToken',
description='''
The name of this enum.
'''),
Child('GenericParameters', kind='GenericParameterClause',
is_optional=True,
description='''
The generic parameters, if any, for this enum.
'''),
Child('InheritanceClause', kind='TypeInheritanceClause',
is_optional=True,
description='''
The inheritance clause describing conformances or raw \
values for this enum.
'''),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True,
description='''
The `where` clause that applies to the generic parameters of \
this enum.
'''),
Child('Members', kind='MemberDeclBlock',
description='''
The cases and other members of this enum.
''')
]),
# operator-decl -> attribute? modifiers? 'operator' operator
Node('OperatorDecl', kind='Decl', traits=['IdentifiedDecl'],
description='A Swift `operator` declaration.',
children=[
Child('Attributes', kind='AttributeList', is_optional=True,
description='''
The attributes applied to the 'operator' declaration.
'''),
Child('Modifiers', kind='ModifierList', is_optional=True,
classification='Attribute',
description='''
The declaration modifiers applied to the 'operator'
declaration.
'''),
Child('OperatorKeyword', kind='OperatorToken'),
Child('Identifier', kind='Token',
token_choices=[
'UnspacedBinaryOperatorToken',
'SpacedBinaryOperatorToken',
'PrefixOperatorToken',
'PostfixOperatorToken',
]),
Child('InfixOperatorGroup', kind='InfixOperatorGroup',
description='''
Optionally specify a precedence group
''',
is_optional=True),
]),
# infix-operator-group -> ':' identifier
Node('InfixOperatorGroup', kind='Syntax',
description='''
A clause to specify precedence group in infix operator declaration.
''',
children=[
Child('Colon', kind='ColonToken'),
Child('PrecedenceGroupName', kind='IdentifierToken',
description='''
The name of the precedence group for the operator
'''),
]),
# precedence-group-decl -> attributes? modifiers? 'precedencegroup'
# identifier '{' precedence-group-attribute-list
# '}'
Node('PrecedenceGroupDecl', kind='Decl', traits=['IdentifiedDecl'],
description='A Swift `precedencegroup` declaration.',
children=[
Child('Attributes', kind='AttributeList', is_optional=True,
description='''
The attributes applied to the 'precedencegroup' declaration.
'''),
Child('Modifiers', kind='ModifierList', is_optional=True,
description='''
The declaration modifiers applied to the 'precedencegroup'
declaration.
'''),
Child('PrecedencegroupKeyword', kind='PrecedencegroupToken'),
Child('Identifier', kind='IdentifierToken',
description='''
The name of this precedence group.
'''),
Child('LeftBrace', kind='LeftBraceToken'),
Child('GroupAttributes', kind='PrecedenceGroupAttributeList',
description='''
The characteristics of this precedence group.
'''),
Child('RightBrace', kind='RightBraceToken'),
]),
# precedence-group-attribute-list ->
# (precedence-group-relation | precedence-group-assignment |
# precedence-group-associativity )*
Node('PrecedenceGroupAttributeList', kind='SyntaxCollection',
element='Syntax',
element_choices=[
'PrecedenceGroupRelation',
'PrecedenceGroupAssignment',
'PrecedenceGroupAssociativity'
]),
# precedence-group-relation ->
# ('higherThan' | 'lowerThan') ':' precedence-group-name-list
Node('PrecedenceGroupRelation', kind='Syntax',
description='''
Specify the new precedence group's relation to existing precedence
groups.
''',
children=[
Child('HigherThanOrLowerThan', kind='IdentifierToken',
classification='Keyword',
text_choices=[
'higherThan', 'lowerThan',
],
description='''
The relation to specified other precedence groups.
'''),
Child('Colon', kind='ColonToken'),
Child('OtherNames', kind='PrecedenceGroupNameList',
description='''
The name of other precedence group to which this precedence
group relates.
'''),
]),
# precedence-group-name-list ->
# identifier (',' identifier)*
Node('PrecedenceGroupNameList', kind='SyntaxCollection',
element='PrecedenceGroupNameElement'),
Node('PrecedenceGroupNameElement', kind='Syntax',
children=[
Child('Name', kind='IdentifierToken'),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# precedence-group-assignment ->
# 'assignment' ':' ('true' | 'false')
Node('PrecedenceGroupAssignment', kind='Syntax',
description='''
Specifies the precedence of an operator when used in an operation
that includes optional chaining.
''',
children=[
Child('AssignmentKeyword', kind='IdentifierToken',
text_choices=['assignment']),
Child('Colon', kind='ColonToken'),
Child('Flag', kind='Token',
token_choices=[
'TrueToken',
'FalseToken',
],
description='''
When true, an operator in the corresponding precedence group
uses the same grouping rules during optional chaining as the
assignment operators from the standard library. Otherwise,
operators in the precedence group follows the same optional
chaining rules as operators that don't perform assignment.
'''),
]),
# precedence-group-associativity ->
# 'associativity' ':' ('left' | 'right' | 'none')
Node('PrecedenceGroupAssociativity', kind='Syntax',
description='''
Specifies how a sequence of operators with the same precedence level
are grouped together in the absence of grouping parentheses.
''',
children=[
Child('AssociativityKeyword', kind='IdentifierToken',
classification='Keyword', text_choices=['associativity']),
Child('Colon', kind='ColonToken'),
Child('Value', kind='IdentifierToken',
text_choices=['left', 'right', 'none'],
description='''
Operators that are `left`-associative group left-to-right.
Operators that are `right`-associative group right-to-left.
Operators that are specified with an associativity of `none`
don't associate at all
'''),
]),
]
|
|
#!/usr/bin/env python
"""
Transport layer abstractions
TODOS:
- split listen() into two subcalls (for StreamSubscriber)
"""
__author__ = 'Dave Foster <[email protected]>'
from pyon.util.log import log
from pyon.util.containers import DotDict
from gevent.event import AsyncResult, Event
from gevent.queue import Queue
from gevent import coros, sleep
from gevent.timeout import Timeout
from gevent.pool import Pool
from contextlib import contextmanager
import os
from pika import BasicProperties
from pyon.util.async import spawn
from pyon.util.pool import IDPool
from uuid import uuid4
from collections import defaultdict
class TransportError(StandardError):
pass
class BaseTransport(object):
def declare_exchange_impl(self, exchange, **kwargs):
raise NotImplementedError()
def delete_exchange_impl(self, exchange, **kwargs):
raise NotImplementedError()
def declare_queue_impl(self, queue, **kwargs):
raise NotImplementedError()
def delete_queue_impl(self, queue, **kwargs):
raise NotImplementedError()
def bind_impl(self, exchange, queue, binding):
raise NotImplementedError()
def unbind_impl(self, exchange, queue, binding):
raise NotImplementedError()
def ack_impl(self, delivery_tag):
raise NotImplementedError()
def reject_impl(self, delivery_tag, requeue=False):
raise NotImplementedError()
def start_consume_impl(self, callback, queue, no_ack=False, exclusive=False):
raise NotImplementedError()
def stop_consume_impl(self, consumer_tag):
raise NotImplementedError()
def setup_listener(self, binding, default_cb):
raise NotImplementedError()
def get_stats_impl(self, queue):
raise NotImplementedError()
def purge_impl(self, queue):
raise NotImplementedError()
def qos_impl(self, prefetch_size=0, prefetch_count=0, global_=False):
raise NotImplementedError()
def publish_impl(self, exchange, routing_key, body, properties, immediate=False, mandatory=False, durable_msg=False):
raise NotImplementedError()
def close(self):
raise NotImplementedError()
@property
def channel_number(self):
raise NotImplementedError()
@property
def active(self):
raise NotImplementedError()
def add_on_close_callback(self, cb):
raise NotImplementedError()
class ComposableTransport(BaseTransport):
"""
A Transport that has its methods composed of two or more transports.
This is used for ExchangeObjects, where we want to compose the container's ex_manager authoritative
transport with a self transport unique to the XO, needed for the following methods:
- ack_impl
- reject_impl
- start_consume_impl
- stop_consume_impl
- qos_impl
- get_stats_impl
- publish_impl (solely for publish rates, not needed for identity in protocol)
"""
common_methods = ['ack_impl',
'reject_impl',
'start_consume_impl',
'stop_consume_impl',
'qos_impl',
'get_stats_impl',
'publish_impl']
def __init__(self, left, right, *methods):
self._transports = [left]
log.debug("ComposableTransport.__init__(%s) %s %s", self.channel_number, type(left), left)
self._methods = { 'declare_exchange_impl': left.declare_exchange_impl,
'delete_exchange_impl' : left.delete_exchange_impl,
'declare_queue_impl' : left.declare_queue_impl,
'delete_queue_impl' : left.delete_queue_impl,
'bind_impl' : left.bind_impl,
'unbind_impl' : left.unbind_impl,
'ack_impl' : left.ack_impl,
'reject_impl' : left.reject_impl,
'start_consume_impl' : left.start_consume_impl,
'stop_consume_impl' : left.stop_consume_impl,
'setup_listener' : left.setup_listener,
'get_stats_impl' : left.get_stats_impl,
'purge_impl' : left.purge_impl,
'qos_impl' : left.qos_impl,
'publish_impl' : left.publish_impl, }
if right is not None:
self.overlay(right, *methods)
self._close_callbacks = []
def overlay(self, transport, *methods):
for m in methods:
self._methods[m] = getattr(transport, m)
log.debug("ComposableTransport.overlay(%s) %s %s (%s)", self.channel_number, type(transport), transport, transport.channel_number)
self._transports.append(transport)
def declare_exchange_impl(self, exchange, **kwargs):
m = self._methods['declare_exchange_impl']
return m(exchange, **kwargs)
def delete_exchange_impl(self, exchange, **kwargs):
m = self._methods['delete_exchange_impl']
return m(exchange, **kwargs)
def declare_queue_impl(self, queue, **kwargs):
m = self._methods['declare_queue_impl']
return m(queue, **kwargs)
def delete_queue_impl(self, queue, **kwargs):
m = self._methods['delete_queue_impl']
return m(queue, **kwargs)
def bind_impl(self, exchange, queue, binding):
m = self._methods['bind_impl']
return m(exchange, queue, binding)
def unbind_impl(self, exchange, queue, binding):
m = self._methods['unbind_impl']
return m(exchange, queue, binding)
def ack_impl(self, delivery_tag):
m = self._methods['ack_impl']
return m(delivery_tag)
def reject_impl(self, delivery_tag, requeue=False):
m = self._methods['reject_impl']
return m(delivery_tag, requeue=requeue)
def start_consume_impl(self, callback, queue, no_ack=False, exclusive=False):
m = self._methods['start_consume_impl']
return m(callback, queue, no_ack=no_ack, exclusive=exclusive)
def stop_consume_impl(self, consumer_tag):
m = self._methods['stop_consume_impl']
return m(consumer_tag)
def setup_listener(self, binding, default_cb):
m = self._methods['setup_listener']
return m(binding, default_cb)
def get_stats_impl(self, queue):
m = self._methods['get_stats_impl']
return m(queue)
def purge_impl(self, queue):
m = self._methods['purge_impl']
return m(queue)
def qos_impl(self, prefetch_size=0, prefetch_count=0, global_=False):
m = self._methods['qos_impl']
return m(prefetch_size=prefetch_size, prefetch_count=prefetch_count, global_=global_)
def publish_impl(self, exchange, routing_key, body, properties, immediate=False, mandatory=False, durable_msg=False):
m = self._methods['publish_impl']
return m(exchange, routing_key, body, properties, immediate=immediate, mandatory=mandatory, durable_msg=durable_msg)
def close(self):
for t in self._transports:
t.close()
for cb in self._close_callbacks:
cb(self, 200, "Closed OK") # @TODO where to get real value
@property
def channel_number(self):
return self._transports[-1].channel_number
def add_on_close_callback(self, cb):
self._close_callbacks.append(cb)
@property
def active(self):
return all(map(lambda x: x.active, self._transports))
class AMQPTransport(BaseTransport):
"""
A transport adapter around a Pika channel.
"""
def __init__(self, amq_chan):
"""
Creates an AMQPTransport, bound to an underlying Pika channel.
"""
#log.info("AMQPTransport(%d)", amq_chan.channel_number)
self._client = amq_chan
self._client.add_on_close_callback(self._on_underlying_close)
self._close_callbacks = []
self.lock = False
def _on_underlying_close(self, code, text):
if not (code == 0 or code == 200):
log.error("AMQPTransport.underlying closed:\n\tchannel number: %s\n\tcode: %d\n\ttext: %s", self.channel_number, code, text)
# PIKA BUG: in v0.9.5, this amq_chan instance will be left around in the callbacks
# manager, and trips a bug in the handler for on_basic_deliver. We attempt to clean
# up for Pika here so we don't goof up when reusing a channel number.
# this appears to be fixed in 3050d116899aced2392def2e3e66ca30c93334ac
# https://github.com/pika/pika/commit/e93c7ebae2c57b798977ba2992602310deb4758b
self._client.callbacks.remove(self._client.channel_number, 'Basic.GetEmpty')
self._client.callbacks.remove(self._client.channel_number, 'Channel.Close')
self._client.callbacks.remove(self._client.channel_number, '_on_basic_deliver')
self._client.callbacks.remove(self._client.channel_number, '_on_basic_get')
# uncomment these lines to see the full callback list that Pika maintains
#stro = pprint.pformat(callbacks._callbacks)
#log.error(str(stro))
for cb in self._close_callbacks:
cb(self, code, text)
@property
def active(self):
if self._client is not None:
if self._client.closing is None:
return True
return False
def close(self):
if self.lock:
return
self._client.close()
@property
def channel_number(self):
return self._client.channel_number
def add_on_close_callback(self, cb):
self._close_callbacks.append(cb)
@contextmanager
def _push_close_cb(self, callback):
self._client.add_on_close_callback(callback)
try:
yield callback
finally:
# PIKA BUG: v0.9.5, we need to specify the callback as a dict - this is fixed in git HEAD (13 Feb 2012)
de = {'handle': callback, 'one_shot': True}
self._client.callbacks.remove(self._client.channel_number, '_on_channel_close', de)
def _sync_call(self, func, cb_arg, *args, **kwargs):
"""
Functionally similar to the generic blocking_cb but with error support that's Channel specific.
"""
ar = AsyncResult()
def cb(*args, **kwargs):
ret = list(args)
if len(kwargs): ret.append(kwargs)
ar.set(ret)
eb = lambda ch, *args: ar.set(TransportError("_sync_call could not complete due to an error (%s)" % args))
kwargs[cb_arg] = cb
with self._push_close_cb(eb):
func(*args, **kwargs)
# Note: MM (2014-04-03): It seems that gevent block or something else can lead to this timeout
# hitting. Increased from 10 to 20
ret_vals = ar.get(timeout=20)
if isinstance(ret_vals, TransportError):
# mark this channel as poison, do not use again!
# don't test for type here, we don't want to have to import PyonSelectConnection
if hasattr(self._client.transport, 'connection') and hasattr(self._client.transport.connection, 'mark_bad_channel'):
self._client.transport.connection.mark_bad_channel(self._client.channel_number)
else:
log.warn("Could not mark channel # (%s) as bad, Pika could be corrupt", self._client.channel_number)
raise ret_vals
if len(ret_vals) == 0:
return None
elif len(ret_vals) == 1:
return ret_vals[0]
return tuple(ret_vals)
def declare_exchange_impl(self, exchange, exchange_type='topic', durable=False, auto_delete=True):
#log.debug("AMQPTransport.declare_exchange_impl(%s): %s, T %s, D %s, AD %s", self._client.channel_number, exchange, exchange_type, durable, auto_delete)
arguments = {}
if os.environ.get('QUEUE_BLAME', None) is not None:
testid = os.environ['QUEUE_BLAME']
arguments.update({'created-by': testid})
self._sync_call(self._client.exchange_declare, 'callback',
exchange=exchange,
type=exchange_type,
durable=durable,
auto_delete=auto_delete,
arguments=arguments)
def delete_exchange_impl(self, exchange, **kwargs):
log.debug("AMQPTransport.delete_exchange_impl(%s): %s", self._client.channel_number, exchange)
self._sync_call(self._client.exchange_delete, 'callback', exchange=exchange)
def declare_queue_impl(self, queue, durable=False, auto_delete=True):
#log.debug("AMQPTransport.declare_queue_impl(%s): %s, D %s, AD %s", self._client.channel_number, queue, durable, auto_delete)
arguments = {}
if os.environ.get('QUEUE_BLAME', None) is not None:
testid = os.environ['QUEUE_BLAME']
arguments.update({'created-by': testid})
frame = self._sync_call(self._client.queue_declare, 'callback',
queue=queue or '',
auto_delete=auto_delete,
durable=durable,
arguments=arguments)
return frame.method.queue
def delete_queue_impl(self, queue, **kwargs):
log.debug("AMQPTransport.delete_queue_impl(%s): %s", self._client.channel_number, queue)
self._sync_call(self._client.queue_delete, 'callback', queue=queue)
def bind_impl(self, exchange, queue, binding):
#log.debug("AMQPTransport.bind_impl(%s): EX %s, Q %s, B %s", self._client.channel_number, exchange, queue, binding)
self._sync_call(self._client.queue_bind, 'callback',
queue=queue,
exchange=exchange,
routing_key=binding)
def unbind_impl(self, exchange, queue, binding):
#log.debug("AMQPTransport.unbind_impl(%s): EX %s, Q %s, B %s", self._client.channel_number, exchange, queue, binding)
self._sync_call(self._client.queue_unbind, 'callback', queue=queue,
exchange=exchange,
routing_key=binding)
def ack_impl(self, delivery_tag):
"""
Acks a message.
"""
#log.debug("AMQPTransport.ack(%s): %s", self._client.channel_number, delivery_tag)
self._client.basic_ack(delivery_tag)
def reject_impl(self, delivery_tag, requeue=False):
"""
Rejects a message.
"""
self._client.basic_reject(delivery_tag, requeue=requeue)
def start_consume_impl(self, callback, queue, no_ack=False, exclusive=False):
"""
Starts consuming on a queue.
Will asynchronously deliver messages to the callback method supplied.
@return A consumer tag to be used when stop_consume_impl is called.
"""
#log.debug("AMQPTransport.start_consume_impl(%s): %s", self._client.channel_number, queue)
consumer_tag = self._client.basic_consume(callback,
queue=queue,
no_ack=no_ack,
exclusive=exclusive)
return consumer_tag
def stop_consume_impl(self, consumer_tag):
"""
Stops consuming by consumer tag.
"""
#log.debug("AMQPTransport.stop_consume_impl(%s): %s", self._client.channel_number, consumer_tag)
self._sync_call(self._client.basic_cancel, 'callback', consumer_tag)
# PIKA 0.9.5 / GEVENT interaction problem here
# we get called back too early, the basic_cancel hasn't really finished processing yet. we need
# to wait until our consumer tag is removed from the pika channel's consumers dict.
# See: https://gist.github.com/3751870
attempts = 5
while attempts > 0:
if consumer_tag not in self._client._consumers:
break
else:
log.debug("stop_consume_impl waiting for ctag to be removed from consumers, attempts rem: %s", attempts)
attempts -= 1
sleep(1)
if consumer_tag in self._client._consumers:
raise TransportError("stop_consume_impl did not complete in the expected amount of time, transport may be compromised")
def setup_listener(self, binding, default_cb):
"""
Calls setup listener via the default callback passed in.
"""
return default_cb(self, binding)
def get_stats_impl(self, queue):
"""
Gets a tuple of number of messages, number of consumers on a queue.
"""
log.debug("AMQPTransport.get_stats_impl(%s): Q %s", self._client.channel_number, queue)
frame = self._sync_call(self._client.queue_declare, 'callback',
queue=queue or '',
passive=True)
return frame.method.message_count, frame.method.consumer_count
def purge_impl(self, queue):
"""
Purges a queue.
"""
log.debug("AMQPTransport.purge_impl(%s): Q %s", self._client.channel_number, queue)
self._sync_call(self._client.queue_purge, 'callback', queue=queue)
def qos_impl(self, prefetch_size=0, prefetch_count=0, global_=False):
"""
Adjusts quality of service for a channel.
"""
#log.debug("AMQPTransport.qos_impl(%s): pf_size %s, pf_count %s, global_ %s", self._client.channel_number, prefetch_size, prefetch_count, global_)
self._sync_call(self._client.basic_qos, 'callback', prefetch_size=prefetch_size, prefetch_count=prefetch_count, global_=global_)
def publish_impl(self, exchange, routing_key, body, properties, immediate=False, mandatory=False, durable_msg=False):
"""
Publishes a message on an exchange.
"""
#log.debug("AMQPTransport.publish(%s): ex %s key %s", self._client.channel_number, exchange, routing_key)
#log.debug("AMQPTransport.publish(%s): ex %s key %s, size %d", self._client.channel_number, exchange, routing_key, len(repr(body))+len(repr(properties)))
if durable_msg:
delivery_mode = 2
else:
delivery_mode = None
props = BasicProperties(headers=properties,
delivery_mode=delivery_mode)
self._client.basic_publish(exchange=exchange, # todo
routing_key=routing_key, # todo
body=body,
properties=props,
immediate=immediate, # todo
mandatory=mandatory) # todo
class NameTrio(object):
"""
Internal representation of a name/queue/binding (optional).
Created and used at the Endpoint layer and sometimes Channel layer.
"""
def __init__(self, exchange=None, queue=None, binding=None):
"""
Creates a NameTrio.
If either exchange or queue is a tuple, it will use that as a (exchange, queue, binding (optional)) triple.
@param exchange An exchange name. You would typically use the sysname for that.
@param queue Queue name.
@param binding A binding/routing key (used for both recv and send sides). Optional,
and if not specified, defaults to the *internal* queue name.
"""
if isinstance(exchange, tuple):
self._exchange, self._queue, self._binding = list(exchange) + ([None] * (3 - len(exchange)))
elif isinstance(queue, tuple):
self._exchange, self._queue, self._binding = list(queue) + ([None] * (3 - len(queue)))
else:
self._exchange = exchange
self._queue = queue
self._binding = binding
@property
def exchange(self):
return self._exchange
@property
def queue(self):
return self._queue
@property
def binding(self):
return self._binding or self._queue
def __str__(self):
return "NP (%s,%s,B: %s)" % (self.exchange, self.queue, self.binding)
class TopicTrie(object):
"""
Support class for building a routing device to do amqp-like pattern matching.
Used for events/pubsub in our system with the local transport. Efficiently stores all registered
subscription topic trees in a trie structure, handling wildcards * and #.
See:
http://www.zeromq.org/whitepapers:message-matching (doesn't handle # so scrapped)
http://www.rabbitmq.com/blog/2010/09/14/very-fast-and-scalable-topic-routing-part-1/
http://www.rabbitmq.com/blog/2011/03/28/very-fast-and-scalable-topic-routing-part-2/
"""
class Node(object):
"""
Internal node of a trie.
Stores two data points: a token (literal string, '*', or '#', or None if used as root element),
and a set of "patterns" aka a ref to an object representing a bind.
"""
def __init__(self, token, patterns=None):
self.token = token
self.patterns = patterns or []
self.children = {}
def get_or_create_child(self, token):
"""
Returns a child node with the given token.
If it doesn't already exist, it is created, otherwise the existing one is returned.
"""
if token in self.children:
return self.children[token]
new_node = TopicTrie.Node(token)
self.children[token] = new_node
return new_node
def get_all_matches(self, topics):
"""
Given a list of topic tokens, returns all patterns stored in child nodes/self that match the topic tokens.
This is a depth-first search pruned by token, with special handling for both wildcard types.
"""
results = []
if len(topics) == 0:
# terminal point, return any pattern we have here
return self.patterns
cur_token = topics[0]
rem_tokens = topics[1:] # will always be a list, even if empty or 1-len
#log.debug('get_all_matches(%s): cur_token %s, rem_tokens %s', self.token, cur_token, rem_tokens)
# child node direct matching
if cur_token in self.children:
results.extend(self.children[cur_token].get_all_matches(rem_tokens))
# now '*' wildcard
if '*' in self.children:
results.extend(self.children['*'].get_all_matches(rem_tokens))
# '#' means any number of tokens - naive method of descent, we'll feed it nothing to start. Then chop the full
# topics all the way down, put the results in a set to remove duplicates, and also any patterns on self.
if '#' in self.children:
# keep popping off and descend, make a set out of results
all_wild_childs = set()
for i in xrange(len(topics)):
res = self.children['#'].get_all_matches(topics[i:])
map(all_wild_childs.add, res)
results.extend(all_wild_childs)
results.extend(self.children['#'].patterns) # any patterns defined in # are legal too
return results
def __init__(self):
"""
Creates a dummy root node that all topic trees hang off of.
"""
self.root = self.Node(None)
def add_topic_tree(self, topic_tree, pattern):
"""
Splits a string topic_tree into tokens (by .) and recursively adds them to the trie.
Adds the pattern at the terminal node for later retrieval.
"""
topics = topic_tree.split(".")
curnode = self.root
for topic in topics:
curnode = curnode.get_or_create_child(topic)
if not pattern in curnode.patterns:
curnode.patterns.append(pattern)
def remove_topic_tree(self, topic_tree, pattern):
"""
Splits a string topic_tree into tokens (by .) and removes the pattern from the terminal node.
@TODO should remove empty nodes
"""
topics = topic_tree.split(".")
curnode = self.root
for topic in topics:
curnode = curnode.get_or_create_child(topic)
if pattern in curnode.patterns:
curnode.patterns.remove(pattern)
def get_all_matches(self, topic_tree):
"""
Returns a list of all matches for a given topic tree string.
Creates a set out of the matching patterns, so multiple binds matching on the same pattern only
return once.
"""
topics = topic_tree.split(".")
return set(self.root.get_all_matches(topics))
class LocalRouter(object):
"""
A RabbitMQ-like routing device implemented with gevent mechanisms for an in-memory broker.
Using LocalTransport, can handle topic-exchange-like communication in ION within the context
of a single container.
"""
class ConsumerClosedMessage(object):
"""
Dummy object used to exit queue get looping greenlets.
"""
pass
def __init__(self, sysname):
self._sysname = sysname
self.ready = Event()
# exchange/queues/bindings
self._exchanges = {} # names -> { subscriber, topictrie(queue name) }
self._queues = {} # names -> gevent queue
self._bindings_by_queue = defaultdict(list) # queue name -> [(ex, binding)]
self._lock_declarables = coros.RLock() # exchanges, queues, bindings, routing method
# consumers
self._consumers = defaultdict(list) # queue name -> [ctag, channel._on_deliver]
self._consumers_by_ctag = {} # ctag -> queue_name ??
self._ctag_pool = IDPool() # pool of consumer tags
self._lock_consumers = coros.RLock() # lock for interacting with any consumer related attrs
# deliveries
self._unacked = {} # dtag -> (ctag, msg)
self._lock_unacked = coros.RLock() # lock for interacting with unacked field
self._gl_msgs = None
self._gl_pool = Pool()
self.gl_ioloop = None
self.errors = []
@property
def _connect_addr(self):
return "inproc://%s" % self._sysname
def start(self):
"""
Starts all internal greenlets of this router device.
"""
self._queue_incoming = Queue()
self._gl_msgs = self._gl_pool.spawn(self._run_gl_msgs)
self._gl_msgs._glname = "pyon.net AMQP msgs"
self._gl_msgs.link_exception(self._child_failed)
self.gl_ioloop = spawn(self._run_ioloop)
self.gl_ioloop._glname = "pyon.net AMQP ioloop"
def stop(self):
self._gl_msgs.kill() # @TODO: better
self._gl_pool.join(timeout=5, raise_error=True)
def _run_gl_msgs(self):
self.ready.set()
while True:
ex, rkey, body, props = self._queue_incoming.get()
try:
with self._lock_declarables:
self._route(ex, rkey, body, props)
except Exception as e:
self.errors.append(e)
log.exception("Routing message")
def _route(self, exchange, routing_key, body, props):
"""
Delivers incoming messages into queues based on known routes.
This entire method runs in a lock (likely pretty slow).
"""
assert exchange in self._exchanges, "Unknown exchange %s" % exchange
queues = self._exchanges[exchange].get_all_matches(routing_key)
log.debug("route: ex %s, rkey %s, matched %s routes", exchange, routing_key, len(queues))
# deliver to each queue
for q in queues:
assert q in self._queues
log.debug("deliver -> %s", q)
self._queues[q].put((exchange, routing_key, body, props))
def _child_failed(self, gproc):
"""
Handler method for when any child worker thread dies with error.
Aborts the "ioloop" greenlet.
"""
log.error("Child (%s) failed with an exception: %s", gproc, gproc.exception)
if self.gl_ioloop:
self.gl_ioloop.kill(exception=gproc.exception, block=False)
def _run_ioloop(self):
"""
An "IOLoop"-like greenlet - sits and waits until the pool is finished.
Fits with the AMQP node.
"""
self._gl_pool.join()
def publish(self, exchange, routing_key, body, properties, immediate=False, mandatory=False):
self._queue_incoming.put((exchange, routing_key, body, properties))
sleep(0.0001) # really wish switch would work instead of a sleep, seems wrong
def declare_exchange(self, exchange, **kwargs):
with self._lock_declarables:
if not exchange in self._exchanges:
self._exchanges[exchange] = TopicTrie()
def delete_exchange(self, exchange, **kwargs):
with self._lock_declarables:
if exchange in self._exchanges:
del self._exchanges[exchange]
def declare_queue(self, queue, **kwargs):
with self._lock_declarables:
# come up with new queue name if none specified
if queue is None or queue == '':
while True:
proposed = "q-%s" % str(uuid4())[0:10]
if proposed not in self._queues:
queue = proposed
break
if not queue in self._queues:
self._queues[queue] = Queue()
return queue
def delete_queue(self, queue, **kwargs):
with self._lock_declarables:
if queue in self._queues:
del self._queues[queue]
# kill bindings
for ex, binding in self._bindings_by_queue[queue]:
if ex in self._exchanges:
self._exchanges[ex].remove_topic_tree(binding, queue)
self._bindings_by_queue.pop(queue)
def bind(self, exchange, queue, binding):
log.info("Bind: ex %s, q %s, b %s", exchange, queue, binding)
with self._lock_declarables:
assert exchange in self._exchanges, "Missing exchange %s in list of exchanges" % str(exchange)
assert queue in self._queues
tt = self._exchanges[exchange]
tt.add_topic_tree(binding, queue)
self._bindings_by_queue[queue].append((exchange, binding))
def unbind(self, exchange, queue, binding):
with self._lock_declarables:
assert exchange in self._exchanges
assert queue in self._queues
self._exchanges[exchange].remove_topic_tree(binding, queue)
for i, val in enumerate(self._bindings_by_queue[queue]):
ex, b = val
if ex == exchange and b == binding:
self._bindings_by_queue[queue].pop(i)
break
def start_consume(self, callback, queue, no_ack=False, exclusive=False):
assert queue in self._queues
with self._lock_consumers:
new_ctag = self._generate_ctag()
assert new_ctag not in self._consumers_by_ctag
with self._lock_declarables:
gl = self._gl_pool.spawn(self._run_consumer, new_ctag, queue, self._queues[queue], callback)
gl.link_exception(self._child_failed)
self._consumers[queue].append((new_ctag, callback, no_ack, exclusive, gl))
self._consumers_by_ctag[new_ctag] = queue
return new_ctag
def stop_consume(self, consumer_tag):
assert consumer_tag in self._consumers_by_ctag
with self._lock_consumers:
queue = self._consumers_by_ctag[consumer_tag]
self._consumers_by_ctag.pop(consumer_tag)
for i, consumer in enumerate(self._consumers[queue]):
if consumer[0] == consumer_tag:
# notify consumer greenlet that we want to stop
if queue in self._queues:
self._queues[queue].put(self.ConsumerClosedMessage())
consumer[4].join(timeout=5)
consumer[4].kill()
# @TODO reject any unacked messages
self._consumers[queue].pop(i)
break
self._return_ctag(consumer_tag)
def _run_consumer(self, ctag, queue_name, gqueue, callback):
cnt = 0
while True:
m = gqueue.get()
if isinstance(m, self.ConsumerClosedMessage):
break
exchange, routing_key, body, props = m
# create method frame
method_frame = DotDict()
method_frame['consumer_tag'] = ctag
method_frame['redelivered'] = False # @TODO
method_frame['exchange'] = exchange
method_frame['routing_key'] = routing_key
# create header frame
header_frame = DotDict()
header_frame['headers'] = props.copy()
# make delivery tag for ack/reject later
dtag = self._generate_dtag(ctag, cnt)
cnt += 1
with self._lock_unacked:
self._unacked[dtag] = (ctag, queue_name, m)
method_frame['delivery_tag'] = dtag
# deliver to callback
try:
callback(self, method_frame, header_frame, body)
except Exception:
log.exception("delivering to consumer, ignore!")
def _generate_ctag(self):
return "zctag-%s" % self._ctag_pool.get_id()
def _return_ctag(self, ctag):
self._ctag_pool.release_id(int(ctag.split("-")[-1]))
def _generate_dtag(self, ctag, cnt):
"""
Generates a unique delivery tag for each consumer.
Greenlet-safe, no need to lock.
"""
return "%s-%s" % (ctag, cnt)
def ack(self, delivery_tag):
assert delivery_tag in self._unacked
with self._lock_unacked:
del self._unacked[delivery_tag]
def reject(self, delivery_tag, requeue=False):
assert delivery_tag in self._unacked
with self._lock_unacked:
_, queue, m = self._unacked.pop(delivery_tag)
if requeue:
log.warn("REQUEUE: EXPERIMENTAL %s", delivery_tag)
self._queues[queue].put(m)
def transport_close(self, transport):
log.warn("LocalRouter.transport_close: %s TODO", transport)
# @TODO reject all messages in unacked spot
# turn off any consumers from this transport
def get_stats(self, queue):
"""
Returns a 2-tuple of (# msgs, # consumers) on a given queue.
"""
assert queue in self._queues
consumers = 0
if queue in self._consumers:
consumers = len(self._consumers[queue])
# the queue qsize gives you number of undelivered messages, which i think is what AMQP does too
return (self._queues[queue].qsize(), consumers)
def purge(self, queue):
"""
Deletes all contents of a queue.
@TODO could end up in a race with an infinite producer
"""
assert queue in self._queues
with Timeout(5):
while not self._queues[queue].empty():
self._queues[queue].get_nowait()
class LocalTransport(BaseTransport):
def __init__(self, broker, ch_number):
self._broker = broker
self._ch_number = ch_number
self._active = True
self._close_callbacks = []
def declare_exchange_impl(self, exchange, **kwargs):
self._broker.declare_exchange(exchange, **kwargs)
def delete_exchange_impl(self, exchange, **kwargs):
self._broker.delete_exchange(exchange, **kwargs)
def declare_queue_impl(self, queue, **kwargs):
return self._broker.declare_queue(queue, **kwargs)
def delete_queue_impl(self, queue, **kwargs):
self._broker.delete_queue(queue, **kwargs)
def bind_impl(self, exchange, queue, binding):
self._broker.bind(exchange, queue, binding)
def unbind_impl(self, exchange, queue, binding):
self._broker.unbind(exchange, queue, binding)
def publish_impl(self, exchange, routing_key, body, properties, immediate=False, mandatory=False, durable_msg=False):
self._broker.publish(exchange, routing_key, body, properties, immediate=immediate, mandatory=mandatory)
def start_consume_impl(self, callback, queue, no_ack=False, exclusive=False):
return self._broker.start_consume(callback, queue, no_ack=no_ack, exclusive=exclusive)
def stop_consume_impl(self, consumer_tag):
self._broker.stop_consume(consumer_tag)
def ack_impl(self, delivery_tag):
self._broker.ack(delivery_tag)
def reject_impl(self, delivery_tag, requeue=False):
self._broker.reject(delivery_tag, requeue=requeue)
def close(self):
self._broker.transport_close(self)
self._active = False
for cb in self._close_callbacks:
cb(self, 200, "Closed ok") # @TODO should come elsewhere
def add_on_close_callback(self, cb):
self._close_callbacks.append(cb)
@property
def active(self):
return self._active
@property
def channel_number(self):
return self._ch_number
def qos_impl(self, prefetch_size=0, prefetch_count=0, global_=False):
log.info("TODO: QOS")
def get_stats_impl(self, queue):
return self._broker.get_stats(queue)
def purge_impl(self, queue):
return self._broker.purge(queue)
|
|
from __future__ import print_function, absolute_import, division
import itertools
import numpy as np
from numba import utils
from numba import unittest_support as unittest
from .support import TestCase, skip_on_numpy_16
DBL_EPSILON = 2**-52
FLT_EPSILON = 2**-23
INF = float('inf')
NAN = float('nan')
class TestAssertPreciseEqual(TestCase):
"""
Tests for TestCase.assertPreciseEqual().
"""
int_types = [int]
if utils.PYVERSION < (3,):
int_types.append(long)
np_float_types = [np.float32, np.float64]
float_types = [float] + np_float_types
np_complex_types = [np.complex64, np.complex128]
complex_types = [complex] + np_complex_types
def eq(self, left, right, **kwargs):
def assert_succeed(left, right):
self.assertPreciseEqual(left, right, **kwargs)
self.assertPreciseEqual(right, left, **kwargs)
assert_succeed(left, right)
assert_succeed((left, left), (right, right))
assert_succeed([left, left], [right, right])
def ne(self, left, right, **kwargs):
def assert_fail(left, right):
try:
self.assertPreciseEqual(left, right, **kwargs)
except AssertionError:
pass
else:
self.fail("%s and %s unexpectedly considered equal" % (left, right))
assert_fail(left, right)
assert_fail(right, left)
assert_fail((left, left), (right, right))
assert_fail((right, right), (left, left))
assert_fail([left, left], [right, right])
assert_fail([right, right], [left, left])
def test_types(self):
# assertPreciseEqual() should test for type compatibility
# int-like, float-like, complex-like are not compatible
for i, f, c in itertools.product(self.int_types, self.float_types,
self.complex_types):
self.ne(i(1), f(1))
self.ne(f(1), c(1))
self.ne(i(1), c(1))
# int and long are compatible between each other
for u, v in itertools.product(self.int_types, self.int_types):
self.eq(u(1), v(1))
# NumPy float types are not compatible between each other
for u, v in itertools.product(self.np_float_types, self.np_float_types):
if u is v:
self.eq(u(1), v(1))
else:
self.ne(u(1), v(1))
# NumPy complex types are not compatible between each other
for u, v in itertools.product(self.np_complex_types, self.np_complex_types):
if u is v:
self.eq(u(1), v(1))
else:
self.ne(u(1), v(1))
def test_int_values(self):
for tp in self.int_types:
for prec in ['exact', 'single', 'double']:
self.eq(tp(0), tp(0), prec=prec)
self.ne(tp(0), tp(1), prec=prec)
self.ne(tp(-1), tp(1), prec=prec)
self.ne(tp(2**80), tp(1+2**80), prec=prec)
def test_abs_tol_parse(self):
# check invalid values in abs_tol kwarg raises
with self.assertRaises(ValueError):
self.eq(np.float64(1e-17), np.float64(1e-17), abs_tol="invalid")
with self.assertRaises(ValueError):
self.eq(np.float64(1), np.float64(2), abs_tol=int(7))
def test_float_values(self):
for tp in self.float_types:
for prec in ['exact', 'single', 'double']:
self.eq(tp(1.5), tp(1.5), prec=prec)
# Signed zeros
self.eq(tp(0.0), tp(0.0), prec=prec)
self.eq(tp(-0.0), tp(-0.0), prec=prec)
self.ne(tp(0.0), tp(-0.0), prec=prec)
self.eq(tp(0.0), tp(-0.0), prec=prec, ignore_sign_on_zero=True)
# Infinities
self.eq(tp(INF), tp(INF), prec=prec)
self.ne(tp(INF), tp(1e38), prec=prec)
self.eq(tp(-INF), tp(-INF), prec=prec)
self.ne(tp(INF), tp(-INF), prec=prec)
# NaNs
self.eq(tp(NAN), tp(NAN), prec=prec)
self.ne(tp(NAN), tp(0), prec=prec)
self.ne(tp(NAN), tp(INF), prec=prec)
self.ne(tp(NAN), tp(-INF), prec=prec)
def test_float64_values(self):
for tp in [float, np.float64]:
self.ne(tp(1.0 + DBL_EPSILON), tp(1.0))
def test_float32_values(self):
tp = np.float32
self.ne(tp(1.0 + FLT_EPSILON), tp(1.0))
def test_float64_values_inexact(self):
for tp in [float, np.float64]:
for scale in [1.0, -2**3, 2**-4, -2**-20]:
a = scale * 1.0
b = scale * (1.0 + DBL_EPSILON)
c = scale * (1.0 + DBL_EPSILON * 2)
d = scale * (1.0 + DBL_EPSILON * 4)
self.ne(tp(a), tp(b))
self.ne(tp(a), tp(b), prec='exact')
self.eq(tp(a), tp(b), prec='double')
self.eq(tp(a), tp(b), prec='double', ulps=1)
self.ne(tp(a), tp(c), prec='double')
self.eq(tp(a), tp(c), prec='double', ulps=2)
self.ne(tp(a), tp(d), prec='double', ulps=2)
self.eq(tp(a), tp(c), prec='double', ulps=3)
self.eq(tp(a), tp(d), prec='double', ulps=3)
# test absolute tolerance based on eps
self.eq(tp(1e-16), tp(3e-16), prec='double', abs_tol="eps")
self.ne(tp(1e-16), tp(4e-16), prec='double', abs_tol="eps")
# test absolute tolerance based on value
self.eq(tp(1e-17), tp(1e-18), prec='double', abs_tol=1e-17)
self.ne(tp(1e-17), tp(3e-17), prec='double', abs_tol=1e-17)
def test_float32_values_inexact(self):
tp = np.float32
for scale in [1.0, -2**3, 2**-4, -2**-20]:
# About the choice of 0.9: there seem to be issues when
# converting
a = scale * 1.0
b = scale * (1.0 + FLT_EPSILON)
c = scale * (1.0 + FLT_EPSILON * 2)
d = scale * (1.0 + FLT_EPSILON * 4)
self.ne(tp(a), tp(b))
self.ne(tp(a), tp(b), prec='exact')
self.ne(tp(a), tp(b), prec='double')
self.eq(tp(a), tp(b), prec='single')
self.ne(tp(a), tp(c), prec='single')
self.eq(tp(a), tp(c), prec='single', ulps=2)
self.ne(tp(a), tp(d), prec='single', ulps=2)
self.eq(tp(a), tp(c), prec='single', ulps=3)
self.eq(tp(a), tp(d), prec='single', ulps=3)
# test absolute tolerance based on eps
self.eq(tp(1e-7), tp(2e-7), prec='single', abs_tol="eps")
self.ne(tp(1e-7), tp(3e-7), prec='single', abs_tol="eps")
# test absolute tolerance based on value
self.eq(tp(1e-7), tp(1e-8), prec='single', abs_tol=1e-7)
self.ne(tp(1e-7), tp(3e-7), prec='single', abs_tol=1e-7)
def test_complex_values(self):
# Complex literals with signed zeros are confusing, better use
# the explicit constructor.
c_pp, c_pn, c_np, c_nn = [complex(0.0, 0.0), complex(0.0, -0.0),
complex(-0.0, 0.0), complex(-0.0, -0.0)]
for tp in self.complex_types:
for prec in ['exact', 'single', 'double']:
self.eq(tp(1 + 2j), tp(1 + 2j), prec=prec)
self.ne(tp(1 + 1j), tp(1 + 2j), prec=prec)
self.ne(tp(2 + 2j), tp(1 + 2j), prec=prec)
# Signed zeros
self.eq(tp(c_pp), tp(c_pp), prec=prec)
self.eq(tp(c_np), tp(c_np), prec=prec)
self.eq(tp(c_nn), tp(c_nn), prec=prec)
self.ne(tp(c_pp), tp(c_pn), prec=prec)
self.ne(tp(c_pn), tp(c_nn), prec=prec)
# Infinities
self.eq(tp(complex(INF, INF)), tp(complex(INF, INF)), prec=prec)
self.eq(tp(complex(INF, -INF)), tp(complex(INF, -INF)), prec=prec)
self.eq(tp(complex(-INF, -INF)), tp(complex(-INF, -INF)), prec=prec)
self.ne(tp(complex(INF, INF)), tp(complex(INF, -INF)), prec=prec)
self.ne(tp(complex(INF, INF)), tp(complex(-INF, INF)), prec=prec)
self.eq(tp(complex(INF, 0)), tp(complex(INF, 0)), prec=prec)
# NaNs
self.eq(tp(complex(NAN, 0)), tp(complex(NAN, 0)), prec=prec)
self.eq(tp(complex(0, NAN)), tp(complex(0, NAN)), prec=prec)
self.eq(tp(complex(NAN, NAN)), tp(complex(NAN, NAN)), prec=prec)
self.eq(tp(complex(INF, NAN)), tp(complex(INF, NAN)), prec=prec)
self.eq(tp(complex(NAN, -INF)), tp(complex(NAN, -INF)), prec=prec)
# FIXME
#self.ne(tp(complex(NAN, INF)), tp(complex(NAN, -INF)))
#self.ne(tp(complex(NAN, 0)), tp(complex(NAN, 1)))
#self.ne(tp(complex(INF, NAN)), tp(complex(-INF, NAN)))
#self.ne(tp(complex(0, NAN)), tp(complex(1, NAN)))
#self.ne(tp(complex(NAN, 0)), tp(complex(0, NAN)))
# XXX should work with other precisions as well?
self.ne(tp(complex(INF, 0)), tp(complex(INF, 1)), prec='exact')
def test_complex128_values_inexact(self):
for tp in [complex, np.complex128]:
for scale in [1.0, -2**3, 2**-4, -2**-20]:
a = scale * 1.0
b = scale * (1.0 + DBL_EPSILON)
c = scale * (1.0 + DBL_EPSILON * 2)
aa = tp(complex(a, a))
ab = tp(complex(a, b))
bb = tp(complex(b, b))
self.ne(tp(aa), tp(ab))
self.eq(tp(aa), tp(ab), prec='double')
self.eq(tp(ab), tp(bb), prec='double')
self.eq(tp(aa), tp(bb), prec='double')
ac = tp(complex(a, c))
cc = tp(complex(c, c))
self.ne(tp(aa), tp(ac), prec='double')
self.ne(tp(ac), tp(cc), prec='double')
self.eq(tp(aa), tp(ac), prec='double', ulps=2)
self.eq(tp(ac), tp(cc), prec='double', ulps=2)
self.eq(tp(aa), tp(cc), prec='double', ulps=2)
self.eq(tp(aa), tp(cc), prec='single')
def test_complex64_values_inexact(self):
tp = np.complex64
for scale in [1.0, -2**3, 2**-4, -2**-20]:
a = scale * 1.0
b = scale * (1.0 + FLT_EPSILON)
c = scale * (1.0 + FLT_EPSILON * 2)
aa = tp(complex(a, a))
ab = tp(complex(a, b))
bb = tp(complex(b, b))
self.ne(tp(aa), tp(ab))
self.ne(tp(aa), tp(ab), prec='double')
self.eq(tp(aa), tp(ab), prec='single')
self.eq(tp(ab), tp(bb), prec='single')
self.eq(tp(aa), tp(bb), prec='single')
ac = tp(complex(a, c))
cc = tp(complex(c, c))
self.ne(tp(aa), tp(ac), prec='single')
self.ne(tp(ac), tp(cc), prec='single')
self.eq(tp(aa), tp(ac), prec='single', ulps=2)
self.eq(tp(ac), tp(cc), prec='single', ulps=2)
self.eq(tp(aa), tp(cc), prec='single', ulps=2)
def test_arrays(self):
a = np.arange(1, 7, dtype=np.int16).reshape((2, 3))
b = a.copy()
self.eq(a, b)
# Different values
self.ne(a, b + 1)
self.ne(a, b[:-1])
self.ne(a, b.T)
# Different dtypes
self.ne(a, b.astype(np.int32))
# Different layout
self.ne(a, b.T.copy().T)
# Different ndim
self.ne(a, b.flatten())
# Different writeability
b.flags.writeable = False
self.ne(a, b)
# Precision
a = np.arange(1, 3, dtype=np.float64)
b = a * (1.0 + DBL_EPSILON)
c = a * (1.0 + DBL_EPSILON * 2)
self.ne(a, b)
self.eq(a, b, prec='double')
self.ne(a, c, prec='double')
@skip_on_numpy_16
def test_npdatetime(self):
a = np.datetime64('1900', 'Y')
b = np.datetime64('1900', 'Y')
c = np.datetime64('1900-01-01', 'D')
d = np.datetime64('1901', 'Y')
self.eq(a, b)
# Different unit
self.ne(a, c)
# Different value
self.ne(a, d)
@skip_on_numpy_16
def test_nptimedelta(self):
a = np.timedelta64(1, 'h')
b = np.timedelta64(1, 'h')
c = np.timedelta64(60, 'm')
d = np.timedelta64(2, 'h')
self.eq(a, b)
# Different unit
self.ne(a, c)
# Different value
self.ne(a, d)
class TestMisc(TestCase):
def test_assertRefCount(self):
# Use floats to avoid integer interning
x = 55.
y = 66.
l = []
with self.assertRefCount(x, y):
pass
with self.assertRaises(AssertionError) as cm:
# y gains a reference
with self.assertRefCount(x, y):
l.append(y)
self.assertIn("66", str(cm.exception))
if __name__ == '__main__':
unittest.main()
|
|
from __future__ import unicode_literals
import pytest
from dtypes.binary_heap import BinaryHeap
# (in, expected) for constructors
valid_constructor_args = [
["qzdfwqefnsadnfoiaweod", "a"],
[[6, 7, 9, 4, 2, 1, 56, 8, 0, 43523], 0],
[[1243.12235, 13262.3, 26523.15, 98653.234], 1243.12235]
]
valid_constructor_args_max = [
["qzdfwqefnsadnfoiaweod", "z"],
[[6, 7, 9, 4, 2, 1, 56, 8, 0, 43523], 43523],
[[1243.12235, 13262.3, 26523.15, 98653.234], 98653.234]
]
pop_constructors = [
[5, 8, 98, 43, 21, 1, 3, 7, 0, 3, 4, 7, 2345, 4, 64],
[33, 5314, 124, 243, 234, 1324, 2, 342, 1243, 134],
]
invalid_constructors = [
8,
None,
4.523423
]
def is_minheap_sorted(heap):
"""Confirm that heap is minheap sorted.
Original idea from:
https://github.com/MigrantJ/data-structures/blob/binheap/binheap/binheap.py
"""
for i in range(len(heap)):
try:
if heap[i] > heap[(2*i + 1)]:
return False
if heap[i] > heap[(2*i) + 2]:
return False
except IndexError:
return True
def is_maxheap_sorted(heap):
"""Confirm that heap is maxheap sorted."""
for i in range(len(heap)):
try:
if heap[i] < heap[(2*i + 1)]:
return False
if heap[i] < heap[(2*i) + 2]:
return False
except IndexError:
return True
@pytest.fixture()
def minheap_empty():
return BinaryHeap()
def test_find_parent():
minheap = BinaryHeap([0, 1, 2, 3, 4, 5, 6])
assert minheap._find_parent(2) == 0
assert minheap._find_parent(6) == 2
def test_find_children():
minheap = BinaryHeap([0, 1, 2, 3, 4, 5, 6])
assert minheap._find_children(0) == (1, 2)
assert minheap._find_children(2) == (5, 6)
def test_is_unsorted_minheap_comparison():
minheap = BinaryHeap(minheap=True)
assert minheap._is_unsorted(1, 2)
def test_is_unsorted_maxheap_comparison():
minheap = BinaryHeap(minheap=False)
assert minheap._is_unsorted(2, 1)
def test_swap():
minheap = BinaryHeap([0, 1, 2, 3, 4, 5, 6])
minheap._swap(0, 6)
assert minheap.tree[0] == 6
assert minheap.tree[6] == 0
def test_bubbledown_minheap():
minheap = BinaryHeap([0, 1, 2, 3, 4, 5, 6])
minheap[0] = 4000
minheap._bubbledown(0)
assert minheap[0] == 1
assert is_minheap_sorted(minheap)
def test_bubbledown_maxheap():
maxheap = BinaryHeap([6, 5, 4, 3, 2, 1, 0], minheap=False)
maxheap[6] = 4000
maxheap._bubbleup(6)
assert maxheap[0] == 4000
assert is_maxheap_sorted(maxheap)
@pytest.mark.parametrize("input, output", valid_constructor_args)
def test_valid_instantiation_min(input, output):
"""Test instantiation by creating and doing one pop"""
heap_under_test = BinaryHeap(input)
assert is_minheap_sorted(heap_under_test)
assert heap_under_test.pop() == output
@pytest.mark.parametrize("input, output", valid_constructor_args_max)
def test_valid_instantiation_max(input, output):
"""Test instantiation by creating and doing one pop"""
heap_under_test = BinaryHeap(input, minheap=False)
assert is_maxheap_sorted(heap_under_test)
assert heap_under_test.pop() == output
@pytest.mark.parametrize("bad_input", invalid_constructors)
def test_invalid_instantiation(bad_input):
"""Test that bad by creating and doing one pop"""
with pytest.raises(TypeError):
BinaryHeap(bad_input)
def test_push1(minheap_empty):
""" First push single item from list of [9, 5, 2, 1, 0, 7] """
minheap_empty.push(9)
assert minheap_empty.pop() == 9
def test_push2(minheap_empty):
""" First push two items from list of [9, 5, 2, 1, 0, 7]; current
test for min heap """
minheap_empty.push(5)
minheap_empty.push(9)
assert minheap_empty.pop() == 5
def test_push3(minheap_empty):
""" First push three items from list of [9, 5, 2, 1, 0, 7]; current
test for min heap """
minheap_empty.push(5)
minheap_empty.push(9)
minheap_empty.push(2)
assert minheap_empty.pop() == 2
def test_push4(minheap_empty):
""" First push four items from list of [9, 5, 2, 1, 0, 7]; current
test for min heap """
minheap_empty.push(5)
minheap_empty.push(9)
minheap_empty.push(2)
minheap_empty.push(1)
assert minheap_empty.pop() == 1
def test_push5(minheap_empty):
""" First push five items from list of [9, 5, 2, 1, 0, 7]; current
test for min heap """
minheap_empty.push(5)
minheap_empty.push(9)
minheap_empty.push(2)
minheap_empty.push(1)
minheap_empty.push(0)
assert minheap_empty.pop() == 0
def test_push6(minheap_empty):
""" First push six items from list of [9, 5, 2, 1, 0, 7]; current
test for min heap """
minheap_empty.push(5)
minheap_empty.push(9)
minheap_empty.push(2)
minheap_empty.push(1)
minheap_empty.push(0)
minheap_empty.push(7)
assert minheap_empty.pop() == 0
def test_pop_minheap():
minheap = BinaryHeap([7, 9, 18, 1, 38, 5.4, 6])
minheap.push(0)
length = len(minheap)
assert minheap.pop() == 0
assert len(minheap) == length - 1
def test_pop_maxheap():
maxheap = BinaryHeap([7, 9, 18, 1, 38, 5.4, 6], minheap=False)
maxheap.push(400)
length = len(maxheap)
assert maxheap.pop() == 400
assert len(maxheap) == length - 1
def test_multipop_minheap():
minheap = BinaryHeap([7, 9, 18, 1, 38, 5.4, 6, 200])
length = len(minheap)
minheap.pop()
minheap.pop()
minheap.push(0)
minheap.pop()
minheap.pop()
assert minheap.pop() == 7
assert len(minheap) == length - 4
def test_multipop_maxheap():
maxheap = BinaryHeap([7, 9, 18, 1, 38, 5.4, 6, 200], minheap=False)
length = len(maxheap)
maxheap.pop()
maxheap.pop()
maxheap.push(400)
maxheap.pop()
maxheap.pop()
assert maxheap.pop() == 9
assert len(maxheap) == length - 4
|
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation)
from threading import Lock
from math import ceil
from azure.storage.common._common_conversion import _encode_base64
from azure.storage.common._error import _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM
from azure.storage.common._serialization import (
url_quote,
_get_data_bytes_only,
_len_plus
)
from ._deserialization import _parse_base_properties
from ._constants import (
_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
)
from ._encryption import (
_get_blob_encryptor_and_padder,
)
from .models import BlobBlock
def _upload_blob_chunks(blob_service, container_name, blob_name,
blob_size, block_size, stream, max_connections,
progress_callback, validate_content, lease_id, uploader_class,
maxsize_condition=None, if_modified_since=None, if_unmodified_since=None, if_match=None,
if_none_match=None, timeout=None, cpk=None,
content_encryption_key=None, initialization_vector=None, resource_properties=None):
encryptor, padder = _get_blob_encryptor_and_padder(content_encryption_key, initialization_vector,
uploader_class is not _PageBlobChunkUploader)
uploader = uploader_class(
blob_service,
container_name,
blob_name,
blob_size,
block_size,
stream,
max_connections > 1,
progress_callback,
validate_content,
lease_id,
timeout,
encryptor,
padder,
cpk,
)
uploader.maxsize_condition = maxsize_condition
# Access conditions do not work with parallelism
if max_connections > 1:
uploader.if_match = uploader.if_none_match = uploader.if_modified_since = uploader.if_unmodified_since = None
else:
uploader.if_match = if_match
uploader.if_none_match = if_none_match
uploader.if_modified_since = if_modified_since
uploader.if_unmodified_since = if_unmodified_since
if progress_callback is not None:
progress_callback(0, blob_size)
if max_connections > 1:
import concurrent.futures
from threading import BoundedSemaphore
'''
Ensures we bound the chunking so we only buffer and submit 'max_connections' amount of work items to the executor.
This is necessary as the executor queue will keep accepting submitted work items, which results in buffering all the blocks if
the max_connections + 1 ensures the next chunk is already buffered and ready for when the worker thread is available.
'''
chunk_throttler = BoundedSemaphore(max_connections + 1)
executor = concurrent.futures.ThreadPoolExecutor(max_connections)
futures = []
running_futures = []
# Check for exceptions and fail fast.
for chunk in uploader.get_chunk_streams():
for f in running_futures:
if f.done():
if f.exception():
raise f.exception()
else:
running_futures.remove(f)
chunk_throttler.acquire()
future = executor.submit(uploader.process_chunk, chunk)
# Calls callback upon completion (even if the callback was added after the Future task is done).
future.add_done_callback(lambda x: chunk_throttler.release())
futures.append(future)
running_futures.append(future)
# result() will wait until completion and also raise any exceptions that may have been set.
range_ids = [f.result() for f in futures]
else:
range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()]
if resource_properties and uploader.response_properties is not None:
resource_properties.clone(uploader.response_properties)
return range_ids
def _upload_blob_substream_blocks(blob_service, container_name, blob_name,
blob_size, block_size, stream, max_connections,
progress_callback, validate_content, lease_id, uploader_class,
maxsize_condition=None, if_match=None, timeout=None, cpk=None):
uploader = uploader_class(
blob_service,
container_name,
blob_name,
blob_size,
block_size,
stream,
max_connections > 1,
progress_callback,
validate_content,
lease_id,
timeout,
None,
None,
cpk,
)
uploader.maxsize_condition = maxsize_condition
# ETag matching does not work with parallelism as a ranged upload may start
# before the previous finishes and provides an etag
uploader.if_match = if_match if not max_connections > 1 else None
if progress_callback is not None:
progress_callback(0, blob_size)
if max_connections > 1:
import concurrent.futures
executor = concurrent.futures.ThreadPoolExecutor(max_connections)
range_ids = list(executor.map(uploader.process_substream_block, uploader.get_substream_blocks()))
else:
range_ids = [uploader.process_substream_block(result) for result in uploader.get_substream_blocks()]
return range_ids
class _BlobChunkUploader(object):
def __init__(self, blob_service, container_name, blob_name, blob_size,
chunk_size, stream, parallel, progress_callback,
validate_content, lease_id, timeout, encryptor, padder, cpk):
self.blob_service = blob_service
self.container_name = container_name
self.blob_name = blob_name
self.blob_size = blob_size
self.chunk_size = chunk_size
self.stream = stream
self.parallel = parallel
self.stream_start = stream.tell() if parallel else None
self.stream_lock = Lock() if parallel else None
self.progress_callback = progress_callback
self.progress_total = 0
self.progress_lock = Lock() if parallel else None
self.validate_content = validate_content
self.lease_id = lease_id
self.timeout = timeout
self.encryptor = encryptor
self.padder = padder
self.response_properties = None
self.cpk = cpk
def get_chunk_streams(self):
index = 0
while True:
data = b''
read_size = self.chunk_size
# Buffer until we either reach the end of the stream or get a whole chunk.
while True:
if self.blob_size:
read_size = min(self.chunk_size - len(data), self.blob_size - (index + len(data)))
temp = self.stream.read(read_size)
temp = _get_data_bytes_only('temp', temp)
data += temp
# We have read an empty string and so are at the end
# of the buffer or we have read a full chunk.
if temp == b'' or len(data) == self.chunk_size:
break
if len(data) == self.chunk_size:
if self.padder:
data = self.padder.update(data)
if self.encryptor:
data = self.encryptor.update(data)
yield index, data
else:
if self.padder:
data = self.padder.update(data) + self.padder.finalize()
if self.encryptor:
data = self.encryptor.update(data) + self.encryptor.finalize()
if len(data) > 0:
yield index, data
break
index += len(data)
def process_chunk(self, chunk_data):
chunk_bytes = chunk_data[1]
chunk_offset = chunk_data[0]
return self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
def _update_progress(self, length):
if self.progress_callback is not None:
if self.progress_lock is not None:
with self.progress_lock:
self.progress_total += length
total = self.progress_total
else:
self.progress_total += length
total = self.progress_total
self.progress_callback(total, self.blob_size)
def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
range_id = self._upload_chunk(chunk_offset, chunk_data)
self._update_progress(len(chunk_data))
return range_id
def get_substream_blocks(self):
assert self.chunk_size is not None
lock = self.stream_lock
blob_length = self.blob_size
if blob_length is None:
blob_length = _len_plus(self.stream)
if blob_length is None:
raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM.format('stream'))
blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
for i in range(blocks):
yield ('BlockId{}'.format("%05d" % i),
_SubStream(self.stream, i * self.chunk_size, last_block_size if i == blocks - 1 else self.chunk_size,
lock))
def process_substream_block(self, block_data):
return self._upload_substream_block_with_progress(block_data[0], block_data[1])
def _upload_substream_block_with_progress(self, block_id, block_stream):
range_id = self._upload_substream_block(block_id, block_stream)
self._update_progress(len(block_stream))
return range_id
def set_response_properties(self, resp):
self.response_properties = resp
class _BlockBlobChunkUploader(_BlobChunkUploader):
def _upload_chunk(self, chunk_offset, chunk_data):
block_id = url_quote(_encode_base64('{0:032d}'.format(chunk_offset)))
self.blob_service._put_block(
self.container_name,
self.blob_name,
chunk_data,
block_id,
validate_content=self.validate_content,
lease_id=self.lease_id,
timeout=self.timeout,
cpk=self.cpk,
)
return BlobBlock(block_id)
def _upload_substream_block(self, block_id, block_stream):
try:
self.blob_service._put_block(
self.container_name,
self.blob_name,
block_stream,
block_id,
validate_content=self.validate_content,
lease_id=self.lease_id,
timeout=self.timeout,
cpk=self.cpk,
)
finally:
block_stream.close()
return BlobBlock(block_id)
class _PageBlobChunkUploader(_BlobChunkUploader):
def _is_chunk_empty(self, chunk_data):
# read until non-zero byte is encountered
# if reached the end without returning, then chunk_data is all 0's
for each_byte in chunk_data:
if each_byte != 0 and each_byte != b'\x00':
return False
return True
def _upload_chunk(self, chunk_start, chunk_data):
# avoid uploading the empty pages
if not self._is_chunk_empty(chunk_data):
chunk_end = chunk_start + len(chunk_data) - 1
resp = self.blob_service._update_page(
self.container_name,
self.blob_name,
chunk_data,
chunk_start,
chunk_end,
validate_content=self.validate_content,
lease_id=self.lease_id,
if_match=self.if_match,
timeout=self.timeout,
cpk=self.cpk,
)
if not self.parallel:
self.if_match = resp.etag
self.set_response_properties(resp)
class _AppendBlobChunkUploader(_BlobChunkUploader):
def _upload_chunk(self, chunk_offset, chunk_data):
if not hasattr(self, 'current_length'):
resp = self.blob_service.append_block(
self.container_name,
self.blob_name,
chunk_data,
validate_content=self.validate_content,
lease_id=self.lease_id,
maxsize_condition=self.maxsize_condition,
timeout=self.timeout,
if_modified_since=self.if_modified_since,
if_unmodified_since=self.if_unmodified_since,
if_match=self.if_match,
if_none_match=self.if_none_match,
cpk=self.cpk,
)
self.current_length = resp.append_offset
else:
resp = self.blob_service.append_block(
self.container_name,
self.blob_name,
chunk_data,
validate_content=self.validate_content,
lease_id=self.lease_id,
maxsize_condition=self.maxsize_condition,
appendpos_condition=self.current_length + chunk_offset,
timeout=self.timeout,
cpk=self.cpk,
)
self.set_response_properties(resp)
class _SubStream(IOBase):
def __init__(self, wrapped_stream, stream_begin_index, length, lockObj):
# Python 2.7: file-like objects created with open() typically support seek(), but are not
# derivations of io.IOBase and thus do not implement seekable().
# Python > 3.0: file-like objects created with open() are derived from io.IOBase.
try:
# only the main thread runs this, so there's no need grabbing the lock
wrapped_stream.seek(0, SEEK_CUR)
except:
raise ValueError("Wrapped stream must support seek().")
self._lock = lockObj
self._wrapped_stream = wrapped_stream
self._position = 0
self._stream_begin_index = stream_begin_index
self._length = length
self._buffer = BytesIO()
# we must avoid buffering more than necessary, and also not use up too much memory
# so the max buffer size is capped at 4MB
self._max_buffer_size = length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE \
else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
self._current_buffer_start = 0
self._current_buffer_size = 0
def __len__(self):
return self._length
def close(self):
if self._buffer:
self._buffer.close()
self._wrapped_stream = None
IOBase.close(self)
def fileno(self):
return self._wrapped_stream.fileno()
def flush(self):
pass
def read(self, n):
if self.closed:
raise ValueError("Stream is closed.")
# adjust if out of bounds
if n + self._position >= self._length:
n = self._length - self._position
# return fast
if n == 0 or self._buffer.closed:
return b''
# attempt first read from the read buffer and update position
read_buffer = self._buffer.read(n)
bytes_read = len(read_buffer)
bytes_remaining = n - bytes_read
self._position += bytes_read
# repopulate the read buffer from the underlying stream to fulfill the request
# ensure the seek and read operations are done atomically (only if a lock is provided)
if bytes_remaining > 0:
with self._buffer:
# either read in the max buffer size specified on the class
# or read in just enough data for the current block/sub stream
current_max_buffer_size = min(self._max_buffer_size, self._length - self._position)
# lock is only defined if max_connections > 1 (parallel uploads)
if self._lock:
with self._lock:
# reposition the underlying stream to match the start of the data to read
absolute_position = self._stream_begin_index + self._position
self._wrapped_stream.seek(absolute_position, SEEK_SET)
# If we can't seek to the right location, our read will be corrupted so fail fast.
if self._wrapped_stream.tell() != absolute_position:
raise IOError("Stream failed to seek to the desired location.")
buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
else:
buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
if buffer_from_stream:
# update the buffer with new data from the wrapped stream
# we need to note down the start position and size of the buffer, in case seek is performed later
self._buffer = BytesIO(buffer_from_stream)
self._current_buffer_start = self._position
self._current_buffer_size = len(buffer_from_stream)
# read the remaining bytes from the new buffer and update position
second_read_buffer = self._buffer.read(bytes_remaining)
read_buffer += second_read_buffer
self._position += len(second_read_buffer)
return read_buffer
def readable(self):
return True
def readinto(self, b):
raise UnsupportedOperation
def seek(self, offset, whence=0):
if whence is SEEK_SET:
start_index = 0
elif whence is SEEK_CUR:
start_index = self._position
elif whence is SEEK_END:
start_index = self._length
offset = - offset
else:
raise ValueError("Invalid argument for the 'whence' parameter.")
pos = start_index + offset
if pos > self._length:
pos = self._length
elif pos < 0:
pos = 0
# check if buffer is still valid
# if not, drop buffer
if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size:
self._buffer.close()
self._buffer = BytesIO()
else: # if yes seek to correct position
delta = pos - self._current_buffer_start
self._buffer.seek(delta, SEEK_SET)
self._position = pos
return pos
def seekable(self):
return True
def tell(self):
return self._position
def write(self):
raise UnsupportedOperation
def writelines(self):
raise UnsupportedOperation
def writeable(self):
return False
|
|
import speclite.filters as spec_filters
import astropy.units as astro_units
import numpy as np
import astropy.constants as constants
from threeML.utils.interval import IntervalSet
class NotASpeclikeFilter(RuntimeError):
pass
class FilterSet(object):
def __init__(self, filter, mask=None):
"""
This class handles the optical filter functionality. It is build around speclite:
http://speclite.readthedocs.io/en/latest/
It accepts speclite fitlerresponse or sequences, allowing for full customization
of the fitlers.
:param filter: a speclite FitlerResponse or FilterSequence
:param mask: an initial mask on the filters (bool array) that remains fixed
"""
# we explicitly violate duck typing here in order to have one routine
# to return values from the filters (speclite appends 's' to the end of sequence calls)
if isinstance(filter,spec_filters.FilterResponse):
# we will make a sequence
self._filters = spec_filters.FilterSequence([filter])
elif isinstance(filter,spec_filters.FilterSequence):
self._filters = filter # type: spec_filters.FilterSequence
else:
raise NotASpeclikeFilter('filter must be a speclite FilterResponse or FilterSequence')
if mask is not None:
tmp = []
for condition, response in zip(mask, self._filters):
if condition:
tmp.append(response)
self._filters = spec_filters.FilterSequence(tmp)
self._names = np.array([name.split('-')[1] for name in self._filters.names])
self._long_name = self._filters.names
# haven't set a likelihood model yet
self._model_set = False
# calculate the FWHM
self._calculate_fwhm()
@property
def wavelength_bounds(self):
"""
IntervalSet of FWHM bounds of the filters
:return:
"""
return self._wavebounds
def _calculate_fwhm(self):
"""
calculate the FWHM of the filters
:return:
"""
wmin = []
wmax = []
# go through each filter
# and find the non-gaussian FWHM bounds
for filter in self._filters:
response = filter.response
max_response = response.max()
idx_max = response.argmax()
half_max = 0.5 * max_response
idx1 = abs(response[:idx_max] -
half_max).argmin()
idx2 = abs(response[idx_max:] -
half_max).argmin() + idx_max
# have to grab the private member here
# bc the library does not expose it!
w1 = filter._wavelength[idx1]
w2 = filter._wavelength[idx2]
wmin.append(w1)
wmax.append(w2)
self._wavebounds = IntervalSet.from_starts_and_stops(wmin,wmax)
def set_model(self, differential_flux):
"""
set the model of that will be used during the convolution. Not that speclite
considers a differential flux to be in units of erg/s/cm2/lambda so we must convert
astromodels into the proper units (using astropy units!)
"""
conversion_factor = (constants.c ** 2 * constants.h ** 2).to('keV2 * cm2')
def wrapped_model(x):
return differential_flux(x) * conversion_factor / x ** 3
self._wrapped_model = wrapped_model
self._model_set = True
def ab_magnitudes(self):
"""
return the effective stimulus of the model and filter for the given
magnitude system
:return: np.ndarray of ab magnitudes
"""
assert self._model_set, 'no likelihood model has been set'
# speclite has issues with unit conversion
# so we will do the calculation manually here
ratio = []
for filter in self._filters:
# first get the flux and convert it to base units
synthetic_flux = filter.convolve_with_function(self._wrapped_model).to('1/(cm2 s)')
# normalize it to the filter's AB magnitude
ratio.append((synthetic_flux/filter.ab_zeropoint.to('1/(cm2 s)')).value)
ratio = np.array(ratio)
return -2.5 * np.log10(ratio)
#return self._filters.get_ab_magnitudes(self._wrapped_model).to_pandas().loc[0]
def plot_filters(self):
"""
plot the filter/ transmission curves
:return: fig
"""
spec_filters.plot_filters(self._filters)
@property
def n_bands(self):
"""
:return: the number of bands
"""
return len(self._filters.names)
@property
def filter_names(self):
"""
:return: the filter names
"""
return self._names
@property
def native_filter_names(self):
"""
the native filter names
:return:
"""
return self._filters.names
@property
def speclite_filters(self):
"""
exposes the speclite fitlers for simulations
:return:
"""
return self._filters
@property
def effective_wavelength(self):
"""
:return: the average wave length of the filters
"""
return self._filters.effective_wavelengths
@property
def waveunits(self):
"""
:return: the pysynphot wave units
"""
return astro_units.Angstrom
|
|
"""The tests for the Entity component helper."""
# pylint: disable=protected-access
from collections import OrderedDict
import logging
from unittest.mock import patch, Mock
from datetime import timedelta
import asynctest
import pytest
import homeassistant.core as ha
from homeassistant.exceptions import PlatformNotReady
from homeassistant.components import group
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.setup import async_setup_component
from homeassistant.helpers import discovery
import homeassistant.util.dt as dt_util
from tests.common import (
MockPlatform, MockModule, mock_coro,
async_fire_time_changed, MockEntity, MockConfigEntry,
mock_entity_platform, mock_integration)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "test_domain"
async def test_setting_up_group(hass):
"""Set up the setting of a group."""
assert await async_setup_component(hass, 'group', {'group': {}})
component = EntityComponent(_LOGGER, DOMAIN, hass,
group_name='everyone')
# No group after setup
assert len(hass.states.async_entity_ids()) == 0
await component.async_add_entities([MockEntity()])
await hass.async_block_till_done()
# group exists
assert len(hass.states.async_entity_ids()) == 2
assert hass.states.async_entity_ids('group') == ['group.everyone']
grp = hass.states.get('group.everyone')
assert grp.attributes.get('entity_id') == \
('test_domain.unnamed_device',)
# group extended
await component.async_add_entities([MockEntity(name='goodbye')])
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 3
grp = hass.states.get('group.everyone')
# Ordered in order of added to the group
assert grp.attributes.get('entity_id') == \
('test_domain.goodbye', 'test_domain.unnamed_device')
async def test_setup_loads_platforms(hass):
"""Test the loading of the platforms."""
component_setup = Mock(return_value=True)
platform_setup = Mock(return_value=None)
mock_integration(hass, MockModule('test_component', setup=component_setup))
# mock the dependencies
mock_integration(hass, MockModule('mod2', dependencies=['test_component']))
mock_entity_platform(hass, 'test_domain.mod2',
MockPlatform(platform_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
assert not component_setup.called
assert not platform_setup.called
component.setup({
DOMAIN: {
'platform': 'mod2',
}
})
await hass.async_block_till_done()
assert component_setup.called
assert platform_setup.called
async def test_setup_recovers_when_setup_raises(hass):
"""Test the setup if exceptions are happening."""
platform1_setup = Mock(side_effect=Exception('Broken'))
platform2_setup = Mock(return_value=None)
mock_entity_platform(hass, 'test_domain.mod1',
MockPlatform(platform1_setup))
mock_entity_platform(hass, 'test_domain.mod2',
MockPlatform(platform2_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
assert not platform1_setup.called
assert not platform2_setup.called
component.setup(OrderedDict([
(DOMAIN, {'platform': 'mod1'}),
("{} 2".format(DOMAIN), {'platform': 'non_exist'}),
("{} 3".format(DOMAIN), {'platform': 'mod2'}),
]))
await hass.async_block_till_done()
assert platform1_setup.called
assert platform2_setup.called
@asynctest.patch('homeassistant.helpers.entity_component.EntityComponent'
'._async_setup_platform', return_value=mock_coro())
@asynctest.patch('homeassistant.setup.async_setup_component',
return_value=mock_coro(True))
async def test_setup_does_discovery(mock_setup_component, mock_setup, hass):
"""Test setup for discovery."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
component.setup({})
discovery.load_platform(hass, DOMAIN, 'platform_test',
{'msg': 'discovery_info'}, {DOMAIN: {}})
await hass.async_block_till_done()
assert mock_setup.called
assert ('platform_test', {}, {'msg': 'discovery_info'}) == \
mock_setup.call_args[0]
@asynctest.patch('homeassistant.helpers.entity_platform.'
'async_track_time_interval')
async def test_set_scan_interval_via_config(mock_track, hass):
"""Test the setting of the scan interval via configuration."""
def platform_setup(hass, config, add_entities, discovery_info=None):
"""Test the platform setup."""
add_entities([MockEntity(should_poll=True)])
mock_entity_platform(hass, 'test_domain.platform',
MockPlatform(platform_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
component.setup({
DOMAIN: {
'platform': 'platform',
'scan_interval': timedelta(seconds=30),
}
})
await hass.async_block_till_done()
assert mock_track.called
assert timedelta(seconds=30) == mock_track.call_args[0][2]
async def test_set_entity_namespace_via_config(hass):
"""Test setting an entity namespace."""
def platform_setup(hass, config, add_entities, discovery_info=None):
"""Test the platform setup."""
add_entities([
MockEntity(name='beer'),
MockEntity(name=None),
])
platform = MockPlatform(platform_setup)
mock_entity_platform(hass, 'test_domain.platform', platform)
component = EntityComponent(_LOGGER, DOMAIN, hass)
component.setup({
DOMAIN: {
'platform': 'platform',
'entity_namespace': 'yummy'
}
})
await hass.async_block_till_done()
assert sorted(hass.states.async_entity_ids()) == \
['test_domain.yummy_beer', 'test_domain.yummy_unnamed_device']
async def test_extract_from_service_available_device(hass):
"""Test the extraction of entity from service and device is available."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2', available=False),
MockEntity(name='test_3'),
MockEntity(name='test_4', available=False),
])
call_1 = ha.ServiceCall('test', 'service')
assert ['test_domain.test_1', 'test_domain.test_3'] == \
sorted(ent.entity_id for ent in
(await component.async_extract_from_service(call_1)))
call_2 = ha.ServiceCall('test', 'service', data={
'entity_id': ['test_domain.test_3', 'test_domain.test_4'],
})
assert ['test_domain.test_3'] == \
sorted(ent.entity_id for ent in
(await component.async_extract_from_service(call_2)))
async def test_platform_not_ready(hass):
"""Test that we retry when platform not ready."""
platform1_setup = Mock(side_effect=[PlatformNotReady, PlatformNotReady,
None])
mock_integration(hass, MockModule('mod1'))
mock_entity_platform(hass, 'test_domain.mod1',
MockPlatform(platform1_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_setup({
DOMAIN: {
'platform': 'mod1'
}
})
assert len(platform1_setup.mock_calls) == 1
assert 'test_domain.mod1' not in hass.config.components
utcnow = dt_util.utcnow()
with patch('homeassistant.util.dt.utcnow', return_value=utcnow):
# Should not trigger attempt 2
async_fire_time_changed(hass, utcnow + timedelta(seconds=29))
await hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 1
# Should trigger attempt 2
async_fire_time_changed(hass, utcnow + timedelta(seconds=30))
await hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 2
assert 'test_domain.mod1' not in hass.config.components
# This should not trigger attempt 3
async_fire_time_changed(hass, utcnow + timedelta(seconds=59))
await hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 2
# Trigger attempt 3, which succeeds
async_fire_time_changed(hass, utcnow + timedelta(seconds=60))
await hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 3
assert 'test_domain.mod1' in hass.config.components
async def test_extract_from_service_returns_all_if_no_entity_id(hass):
"""Test the extraction of everything from service."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service')
assert ['test_domain.test_1', 'test_domain.test_2'] == \
sorted(ent.entity_id for ent in
(await component.async_extract_from_service(call)))
async def test_extract_from_service_filter_out_non_existing_entities(hass):
"""Test the extraction of non existing entities from service."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service', {
'entity_id': ['test_domain.test_2', 'test_domain.non_exist']
})
assert ['test_domain.test_2'] == \
[ent.entity_id for ent
in await component.async_extract_from_service(call)]
async def test_extract_from_service_no_group_expand(hass):
"""Test not expanding a group."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
test_group = await group.Group.async_create_group(
hass, 'test_group', ['light.Ceiling', 'light.Kitchen'])
await component.async_add_entities([test_group])
call = ha.ServiceCall('test', 'service', {
'entity_id': ['group.test_group']
})
extracted = await component.async_extract_from_service(
call, expand_group=False)
assert extracted == [test_group]
async def test_setup_dependencies_platform(hass):
"""Test we setup the dependencies of a platform.
We're explictely testing that we process dependencies even if a component
with the same name has already been loaded.
"""
mock_integration(hass, MockModule('test_component',
dependencies=['test_component2']))
mock_integration(hass, MockModule('test_component2'))
mock_entity_platform(hass, 'test_domain.test_component', MockPlatform())
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_setup({
DOMAIN: {
'platform': 'test_component',
}
})
assert 'test_component' in hass.config.components
assert 'test_component2' in hass.config.components
assert 'test_domain.test_component' in hass.config.components
async def test_setup_entry(hass):
"""Test setup entry calls async_setup_entry on platform."""
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass, 'test_domain.entry_domain',
MockPlatform(async_setup_entry=mock_setup_entry,
scan_interval=timedelta(seconds=5)))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert await component.async_setup_entry(entry)
assert len(mock_setup_entry.mock_calls) == 1
p_hass, p_entry, _ = mock_setup_entry.mock_calls[0][1]
assert p_hass is hass
assert p_entry is entry
assert component._platforms[entry.entry_id].scan_interval == \
timedelta(seconds=5)
async def test_setup_entry_platform_not_exist(hass):
"""Test setup entry fails if platform doesnt exist."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='non_existing')
assert (await component.async_setup_entry(entry)) is False
async def test_setup_entry_fails_duplicate(hass):
"""Test we don't allow setting up a config entry twice."""
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass, 'test_domain.entry_domain',
MockPlatform(async_setup_entry=mock_setup_entry))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert await component.async_setup_entry(entry)
with pytest.raises(ValueError):
await component.async_setup_entry(entry)
async def test_unload_entry_resets_platform(hass):
"""Test unloading an entry removes all entities."""
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass, 'test_domain.entry_domain',
MockPlatform(async_setup_entry=mock_setup_entry))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert await component.async_setup_entry(entry)
assert len(mock_setup_entry.mock_calls) == 1
add_entities = mock_setup_entry.mock_calls[0][1][2]
add_entities([MockEntity()])
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 1
assert await component.async_unload_entry(entry)
assert len(hass.states.async_entity_ids()) == 0
async def test_unload_entry_fails_if_never_loaded(hass):
"""."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
with pytest.raises(ValueError):
await component.async_unload_entry(entry)
async def test_update_entity(hass):
"""Test that we can update an entity with the helper."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entity = MockEntity()
entity.async_update_ha_state = Mock(return_value=mock_coro())
await component.async_add_entities([entity])
# Called as part of async_add_entities
assert len(entity.async_update_ha_state.mock_calls) == 1
await hass.helpers.entity_component.async_update_entity(entity.entity_id)
assert len(entity.async_update_ha_state.mock_calls) == 2
assert entity.async_update_ha_state.mock_calls[-1][1][0] is True
async def test_set_service_race(hass):
"""Test race condition on setting service."""
exception = False
def async_loop_exception_handler(_, _2) -> None:
"""Handle all exception inside the core loop."""
nonlocal exception
exception = True
hass.loop.set_exception_handler(async_loop_exception_handler)
await async_setup_component(hass, 'group', {})
component = EntityComponent(_LOGGER, DOMAIN, hass, group_name='yo')
for _ in range(2):
hass.async_create_task(component.async_add_entities([MockEntity()]))
await hass.async_block_till_done()
assert not exception
async def test_extract_all_omit_entity_id(hass, caplog):
"""Test extract all with None and *."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service')
assert ['test_domain.test_1', 'test_domain.test_2'] == \
sorted(ent.entity_id for ent in
await component.async_extract_from_service(call))
assert ('Not passing an entity ID to a service to target all entities is '
'deprecated') in caplog.text
async def test_extract_all_use_match_all(hass, caplog):
"""Test extract all with None and *."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service', {'entity_id': 'all'})
assert ['test_domain.test_1', 'test_domain.test_2'] == \
sorted(ent.entity_id for ent in
await component.async_extract_from_service(call))
assert ('Not passing an entity ID to a service to target all entities is '
'deprecated') not in caplog.text
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for training models.
See the [Training](https://tensorflow.org/api_guides/python/train) guide.
"""
# Optimizers.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-bad-import-order,unused-import
from tensorflow.python.ops.sdca_ops import sdca_optimizer
from tensorflow.python.ops.sdca_ops import sdca_fprint
from tensorflow.python.ops.sdca_ops import sdca_shrink_l1
from tensorflow.python.training.adadelta import AdadeltaOptimizer
from tensorflow.python.training.adagrad import AdagradOptimizer
from tensorflow.python.training.adagrad_da import AdagradDAOptimizer
from tensorflow.python.training.proximal_adagrad import ProximalAdagradOptimizer
from tensorflow.python.training.adam import AdamOptimizer
from tensorflow.python.training.ftrl import FtrlOptimizer
from tensorflow.python.training.experimental.loss_scale_optimizer import MixedPrecisionLossScaleOptimizer
from tensorflow.python.training.experimental.mixed_precision import enable_mixed_precision_graph_rewrite_v1
from tensorflow.python.training.momentum import MomentumOptimizer
from tensorflow.python.training.moving_averages import ExponentialMovingAverage
from tensorflow.python.training.optimizer import Optimizer
from tensorflow.python.training.rmsprop import RMSPropOptimizer
from tensorflow.python.training.gradient_descent import GradientDescentOptimizer
from tensorflow.python.training.proximal_gradient_descent import ProximalGradientDescentOptimizer
from tensorflow.python.training.sync_replicas_optimizer import SyncReplicasOptimizer
# Utility classes for training.
from tensorflow.python.training.coordinator import Coordinator
from tensorflow.python.training.coordinator import LooperThread
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.training.queue_runner import *
# For the module level doc.
from tensorflow.python.training import input as _input
from tensorflow.python.training.input import * # pylint: disable=redefined-builtin
# pylint: enable=wildcard-import
from tensorflow.python.training.basic_session_run_hooks import get_or_create_steps_per_run_variable
from tensorflow.python.training.basic_session_run_hooks import SecondOrStepTimer
from tensorflow.python.training.basic_session_run_hooks import LoggingTensorHook
from tensorflow.python.training.basic_session_run_hooks import StopAtStepHook
from tensorflow.python.training.basic_session_run_hooks import CheckpointSaverHook
from tensorflow.python.training.basic_session_run_hooks import CheckpointSaverListener
from tensorflow.python.training.basic_session_run_hooks import StepCounterHook
from tensorflow.python.training.basic_session_run_hooks import NanLossDuringTrainingError
from tensorflow.python.training.basic_session_run_hooks import NanTensorHook
from tensorflow.python.training.basic_session_run_hooks import SummarySaverHook
from tensorflow.python.training.basic_session_run_hooks import GlobalStepWaiterHook
from tensorflow.python.training.basic_session_run_hooks import FinalOpsHook
from tensorflow.python.training.basic_session_run_hooks import FeedFnHook
from tensorflow.python.training.basic_session_run_hooks import ProfilerHook
from tensorflow.python.training.basic_loops import basic_train_loop
from tensorflow.python.training.tracking.python_state import PythonState
from tensorflow.python.training.tracking.util import Checkpoint
from tensorflow.python.training.checkpoint_utils import init_from_checkpoint
from tensorflow.python.training.checkpoint_utils import list_variables
from tensorflow.python.training.checkpoint_utils import load_checkpoint
from tensorflow.python.training.checkpoint_utils import load_variable
from tensorflow.python.training.device_setter import replica_device_setter
from tensorflow.python.training.monitored_session import Scaffold
from tensorflow.python.training.monitored_session import MonitoredTrainingSession
from tensorflow.python.training.monitored_session import SessionCreator
from tensorflow.python.training.monitored_session import ChiefSessionCreator
from tensorflow.python.training.monitored_session import WorkerSessionCreator
from tensorflow.python.training.monitored_session import MonitoredSession
from tensorflow.python.training.monitored_session import SingularMonitoredSession
from tensorflow.python.training.saver import Saver
from tensorflow.python.training.checkpoint_management import checkpoint_exists
from tensorflow.python.training.checkpoint_management import generate_checkpoint_state_proto
from tensorflow.python.training.checkpoint_management import get_checkpoint_mtimes
from tensorflow.python.training.checkpoint_management import get_checkpoint_state
from tensorflow.python.training.checkpoint_management import latest_checkpoint
from tensorflow.python.training.checkpoint_management import update_checkpoint_state
from tensorflow.python.training.saver import export_meta_graph
from tensorflow.python.training.saver import import_meta_graph
from tensorflow.python.training.session_run_hook import SessionRunHook
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.training.session_run_hook import SessionRunContext
from tensorflow.python.training.session_run_hook import SessionRunValues
from tensorflow.python.training.session_manager import SessionManager
from tensorflow.python.training.summary_io import summary_iterator
from tensorflow.python.training.supervisor import Supervisor
from tensorflow.python.training.training_util import write_graph
from tensorflow.python.training.training_util import global_step
from tensorflow.python.training.training_util import get_global_step
from tensorflow.python.training.training_util import assert_global_step
from tensorflow.python.training.training_util import create_global_step
from tensorflow.python.training.training_util import get_or_create_global_step
from tensorflow.python.training.warm_starting_util import VocabInfo
from tensorflow.python.training.warm_starting_util import warm_start
from tensorflow.python.training.py_checkpoint_reader import NewCheckpointReader
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=wildcard-import
# Training data protos.
from tensorflow.core.example.example_pb2 import *
from tensorflow.core.example.feature_pb2 import *
from tensorflow.core.protobuf.saver_pb2 import *
# Utility op. Open Source. TODO(touts): move to nn?
from tensorflow.python.training.learning_rate_decay import *
# pylint: enable=wildcard-import
# Distributed computing support.
from tensorflow.core.protobuf.cluster_pb2 import ClusterDef
from tensorflow.core.protobuf.cluster_pb2 import JobDef
from tensorflow.core.protobuf.tensorflow_server_pb2 import ServerDef
from tensorflow.python.training.server_lib import ClusterSpec
from tensorflow.python.training.server_lib import Server
# pylint: disable=undefined-variable
tf_export("train.BytesList")(BytesList)
tf_export("train.ClusterDef")(ClusterDef)
tf_export("train.Example")(Example)
tf_export("train.Feature")(Feature)
tf_export("train.Features")(Features)
tf_export("train.FeatureList")(FeatureList)
tf_export("train.FeatureLists")(FeatureLists)
tf_export("train.FloatList")(FloatList)
tf_export("train.Int64List")(Int64List)
tf_export("train.JobDef")(JobDef)
tf_export(v1=["train.SaverDef"])(SaverDef)
tf_export("train.SequenceExample")(SequenceExample)
tf_export("train.ServerDef")(ServerDef)
# Docstring definitions for protos.
# LINT.IfChange
BytesList.__doc__ = """\
Container that holds repeated fundamental values of byte type in the `tf.train.Feature` message.
See the [`tf.train.Example`](https://www.tensorflow.org/tutorials/load_data/tfrecord#tftrainexample)
guide for usage details.
"""
FloatList.__doc__ = """\
Container that holds repeated fundamental values of float type in the `tf.train.Feature` message.
See the [`tf.train.Example`](https://www.tensorflow.org/tutorials/load_data/tfrecord#tftrainexample)
guide for usage details.
"""
Int64List.__doc__ = """\
Container that holds repeated fundamental value of int64 type in the `tf.train.Feature` message.
See the [`tf.train.Example`](https://www.tensorflow.org/tutorials/load_data/tfrecord#tftrainexample)
guide for usage details.
"""
Feature.__doc__ = """\
A `Feature` is a list which may hold zero or more values.
There are three base `Feature` types:
- `tf.train.BytesList`
- `tf.train.FloatList`
- `tf.train.Int64List`
"""
Features.__doc__ = """\
Protocol message for describing the `features` of a `tf.train.Example`.
`Features` are organized into categories by name. The `Features` message
contains the mapping from name to `tf.train.Feature`.
One item value of `Features` for a movie recommendation application:
```
feature {
key: "age"
value { float_list {
value: 29.0
}}
}
feature {
key: "movie"
value { bytes_list {
value: "The Shawshank Redemption"
value: "Fight Club"
}}
}
feature {
key: "movie_ratings"
value { float_list {
value: 9.0
value: 9.7
}}
}
feature {
key: "suggestion"
value { bytes_list {
value: "Inception"
}}
}
feature {
key: "suggestion_purchased"
value { int64_list {
value: 1
}}
}
feature {
key: "purchase_price"
value { float_list {
value: 9.99
}}
}
```
"""
FeatureList.__doc__ = "Contains zero or more values of `tf.train.Feature`s."
FeatureLists.__doc__ = ("Contains the mapping from name to "
"`tf.train.FeatureList`.")
# LINT.ThenChange(
# https://www.tensorflow.org/code/tensorflow/core/example/feature.proto)
# LINT.IfChange
Example.__doc__ = """\
An `Example` is a mostly-normalized data format for storing data for training and inference.
It contains a key-value store `features` where each key (string) maps to a
`tf.train.Feature` message. This flexible and compact format allows the
storage of large amounts of typed data, but requires that the data shape
and use be determined by the configuration files and parsers that are used to
read and write this format.
In TensorFlow, `Example`s are read in row-major
format, so any configuration that describes data with rank-2 or above
should keep this in mind. For example, to store an `M x N` matrix of bytes,
the `tf.train.BytesList` must contain M*N bytes, with `M` rows of `N` contiguous values
each. That is, the `BytesList` value must store the matrix as:
```.... row 0 .... // .... row 1 .... // ........... // ... row M-1 ....```
An `Example` for a movie recommendation application:
```
features {
feature {
key: "age"
value { float_list {
value: 29.0
}}
}
feature {
key: "movie"
value { bytes_list {
value: "The Shawshank Redemption"
value: "Fight Club"
}}
}
feature {
key: "movie_ratings"
value { float_list {
value: 9.0
value: 9.7
}}
}
feature {
key: "suggestion"
value { bytes_list {
value: "Inception"
}}
}
# Note that this feature exists to be used as a label in training.
# E.g., if training a logistic regression model to predict purchase
# probability in our learning tool we would set the label feature to
# "suggestion_purchased".
feature {
key: "suggestion_purchased"
value { float_list {
value: 1.0
}}
}
# Similar to "suggestion_purchased" above this feature exists to be used
# as a label in training.
# E.g., if training a linear regression model to predict purchase
# price in our learning tool we would set the label feature to
# "purchase_price".
feature {
key: "purchase_price"
value { float_list {
value: 9.99
}}
}
}
```
A conformant `Example` dataset obeys the following conventions:
- If a Feature `K` exists in one example with data type `T`, it must be of
type `T` in all other examples when present. It may be omitted.
- The number of instances of Feature `K` list data may vary across examples,
depending on the requirements of the model.
- If a Feature `K` doesn't exist in an example, a `K`-specific default will be
used, if configured.
- If a Feature `K` exists in an example but contains no items, the intent
is considered to be an empty tensor and no default will be used.
"""
SequenceExample.__doc__ = """\
A `SequenceExample` is a format for representing one or more sequences and some context.
The `context` contains features which apply to the entire
example. The `feature_lists` contain a key, value map where each key is
associated with a repeated set of `tf.train.Features` (a `tf.train.FeatureList`).
A `FeatureList` represents the values of a feature identified by its key
over time / frames.
Below is a `SequenceExample` for a movie recommendation application recording a
sequence of ratings by a user. The time-independent features ("locale",
"age", "favorites") describing the user are part of the context. The sequence
of movies the user rated are part of the feature_lists. For each movie in the
sequence we have information on its name and actors and the user's rating.
This information is recorded in three separate `feature_list`s.
In the example below there are only two movies. All three `feature_list`s,
namely "movie_ratings", "movie_names", and "actors" have a feature value for
both movies. Note, that "actors" is itself a `bytes_list` with multiple
strings per movie.
```
context: {
feature: {
key : "locale"
value: {
bytes_list: {
value: [ "pt_BR" ]
}
}
}
feature: {
key : "age"
value: {
float_list: {
value: [ 19.0 ]
}
}
}
feature: {
key : "favorites"
value: {
bytes_list: {
value: [ "Majesty Rose", "Savannah Outen", "One Direction" ]
}
}
}
}
feature_lists: {
feature_list: {
key : "movie_ratings"
value: {
feature: {
float_list: {
value: [ 4.5 ]
}
}
feature: {
float_list: {
value: [ 5.0 ]
}
}
}
}
feature_list: {
key : "movie_names"
value: {
feature: {
bytes_list: {
value: [ "The Shawshank Redemption" ]
}
}
feature: {
bytes_list: {
value: [ "Fight Club" ]
}
}
}
}
feature_list: {
key : "actors"
value: {
feature: {
bytes_list: {
value: [ "Tim Robbins", "Morgan Freeman" ]
}
}
feature: {
bytes_list: {
value: [ "Brad Pitt", "Edward Norton", "Helena Bonham Carter" ]
}
}
}
}
}
```
A conformant `SequenceExample` data set obeys the following conventions:
`context`:
- All conformant context features `K` must obey the same conventions as
a conformant Example's features (see above).
`feature_lists`:
- A `FeatureList L` may be missing in an example; it is up to the
parser configuration to determine if this is allowed or considered
an empty list (zero length).
- If a `FeatureList L` exists, it may be empty (zero length).
- If a `FeatureList L` is non-empty, all features within the `FeatureList`
must have the same data type `T`. Even across `SequenceExample`s, the type `T`
of the `FeatureList` identified by the same key must be the same. An entry
without any values may serve as an empty feature.
- If a `FeatureList L` is non-empty, it is up to the parser configuration
to determine if all features within the `FeatureList` must
have the same size. The same holds for this `FeatureList` across multiple
examples.
- For sequence modeling ([example](https://github.com/tensorflow/nmt)), the
feature lists represent a sequence of frames. In this scenario, all
`FeatureList`s in a `SequenceExample` have the same number of `Feature`
messages, so that the i-th element in each `FeatureList` is part of the
i-th frame (or time step).
**Examples of conformant and non-conformant examples' `FeatureLists`:**
Conformant `FeatureLists`:
```
feature_lists: { feature_list: {
key: "movie_ratings"
value: { feature: { float_list: { value: [ 4.5 ] } }
feature: { float_list: { value: [ 5.0 ] } } }
} }
```
Non-conformant `FeatureLists` (mismatched types):
```
feature_lists: { feature_list: {
key: "movie_ratings"
value: { feature: { float_list: { value: [ 4.5 ] } }
feature: { int64_list: { value: [ 5 ] } } }
} }
```
Conditionally conformant `FeatureLists`, the parser configuration determines
if the feature sizes must match:
```
feature_lists: { feature_list: {
key: "movie_ratings"
value: { feature: { float_list: { value: [ 4.5 ] } }
feature: { float_list: { value: [ 5.0, 6.0 ] } } }
} }
```
**Examples of conformant and non-conformant `SequenceExample`s:**
Conformant pair of SequenceExample:
```
feature_lists: { feature_list: {
key: "movie_ratings"
value: { feature: { float_list: { value: [ 4.5 ] } }
feature: { float_list: { value: [ 5.0 ] } } }
} }
feature_lists: { feature_list: {
key: "movie_ratings"
value: { feature: { float_list: { value: [ 4.5 ] } }
feature: { float_list: { value: [ 5.0 ] } }
feature: { float_list: { value: [ 2.0 ] } } }
} }
```
Conformant pair of `SequenceExample`s:
```
feature_lists: { feature_list: {
key: "movie_ratings"
value: { feature: { float_list: { value: [ 4.5 ] } }
feature: { float_list: { value: [ 5.0 ] } } }
} }
feature_lists: { feature_list: {
key: "movie_ratings"
value: { }
} }
```
Conditionally conformant pair of `SequenceExample`s, the parser configuration
determines if the second `feature_lists` is consistent (zero-length) or
invalid (missing "movie_ratings"):
```
feature_lists: { feature_list: {
key: "movie_ratings"
value: { feature: { float_list: { value: [ 4.5 ] } }
feature: { float_list: { value: [ 5.0 ] } } }
} }
feature_lists: { }
```
Non-conformant pair of `SequenceExample`s (mismatched types):
```
feature_lists: { feature_list: {
key: "movie_ratings"
value: { feature: { float_list: { value: [ 4.5 ] } }
feature: { float_list: { value: [ 5.0 ] } } }
} }
feature_lists: { feature_list: {
key: "movie_ratings"
value: { feature: { int64_list: { value: [ 4 ] } }
feature: { int64_list: { value: [ 5 ] } }
feature: { int64_list: { value: [ 2 ] } } }
} }
```
Conditionally conformant pair of `SequenceExample`s; the parser configuration
determines if the feature sizes must match:
```
feature_lists: { feature_list: {
key: "movie_ratings"
value: { feature: { float_list: { value: [ 4.5 ] } }
feature: { float_list: { value: [ 5.0 ] } } }
} }
feature_lists: { feature_list: {
key: "movie_ratings"
value: { feature: { float_list: { value: [ 4.0 ] } }
feature: { float_list: { value: [ 5.0, 3.0 ] } }
} }
```
"""
# pylint: enable=undefined-variable
# LINT.ThenChange(
# https://www.tensorflow.org/code/tensorflow/core/example/example.proto)
|
|
"""
Utility functions for working with simulated targets
"""
import os
import sys
import numpy as np
import yaml
from desimodel.focalplane import FocalPlane
import desimodel.io
from desispec import brick
from desispec.io.fibermap import empty_fibermap
from desisim import io
def sample_objtype(nobj):
"""
Return a random sampling of object types (ELG, LRG, QSO, STD, BAD_QSO)
Args:
nobj : number of objects to generate
Returns:
(true_objtype, target_objtype)
where
true_objtype : array of what type the objects actually are
target_objtype : array of type they were targeted as
Notes:
- Actual fiber assignment will result in higher relative fractions of
LRGs and QSOs in early passes and more ELGs in later passes.
- Also ensures at least 2 sky and 1 stdstar, even if nobj is small
"""
#- Load target densities
#- TODO: what about nobs_boss (BOSS-like LRGs)?
fx = open(os.getenv('DESIMODEL')+'/data/targets/targets.dat')
tgt = yaml.load(fx)
fx.close()
ntgt = float(tgt['nobs_lrg'] + tgt['nobs_elg'] + \
tgt['nobs_qso'] + tgt['nobs_lya'] + tgt['ntarget_badqso'])
#- Fraction of sky and standard star targets is guaranteed
nsky = int(tgt['frac_sky'] * nobj)
nstd = int(tgt['frac_std'] * nobj)
#- Assure at least 2 sky and 1 std
if nobj >= 3:
if nstd < 1:
nstd = 1
if nsky < 2:
nsky = 2
#- Number of science fibers available
nsci = nobj - (nsky+nstd)
#- LRGs ELGs QSOs
nlrg = np.random.poisson(nsci * tgt['nobs_lrg'] / ntgt)
nqso = np.random.poisson(nsci * (tgt['nobs_qso'] + tgt['nobs_lya']) / ntgt)
nqso_bad = np.random.poisson(nsci * (tgt['ntarget_badqso']) / ntgt)
nelg = nobj - (nlrg+nqso+nqso_bad+nsky+nstd)
true_objtype = ['SKY']*nsky + ['STD']*nstd
true_objtype += ['ELG']*nelg
true_objtype += ['LRG']*nlrg
true_objtype += ['QSO']*nqso + ['QSO_BAD']*nqso_bad
assert(len(true_objtype) == nobj)
np.random.shuffle(true_objtype)
target_objtype = list()
for x in true_objtype:
if x == 'QSO_BAD':
target_objtype.append('QSO')
else:
target_objtype.append(x)
target_objtype = np.array(target_objtype)
true_objtype = np.array(true_objtype)
return true_objtype, target_objtype
def get_targets(nspec, tileid=None):
"""
Returns:
fibermap
truth table
TODO: document this better
"""
if tileid is None:
tile_ra, tile_dec = 0.0, 0.0
else:
tile_ra, tile_dec = io.get_tile_radec(tileid)
#- Get distribution of target types
true_objtype, target_objtype = sample_objtype(nspec)
#- Get DESI wavelength coverage
wavemin = desimodel.io.load_throughput('b').wavemin
wavemax = desimodel.io.load_throughput('z').wavemax
dw = 0.2
wave = np.arange(round(wavemin, 1), wavemax, dw)
nwave = len(wave)
truth = dict()
truth['FLUX'] = np.zeros( (nspec, len(wave)) )
truth['REDSHIFT'] = np.zeros(nspec, dtype='f4')
truth['TEMPLATEID'] = np.zeros(nspec, dtype='i4')
truth['O2FLUX'] = np.zeros(nspec, dtype='f4')
truth['OBJTYPE'] = np.zeros(nspec, dtype='S10')
#- Note: unlike other elements, first index of WAVE isn't spectrum index
truth['WAVE'] = wave
fibermap = empty_fibermap(nspec)
for objtype in set(true_objtype):
ii = np.where(true_objtype == objtype)[0]
fibermap['OBJTYPE'][ii] = target_objtype[ii]
truth['OBJTYPE'][ii] = true_objtype[ii]
if objtype == 'SKY':
continue
try:
simflux, meta = io.read_templates(wave, objtype, len(ii))
except ValueError, err:
print err
continue
truth['FLUX'][ii] = simflux
#- STD don't have redshift Z; others do
#- In principle we should also have redshifts (radial velocities)
#- for standards as well.
if 'Z' in meta.dtype.names:
truth['REDSHIFT'][ii] = meta['Z']
elif objtype != 'STD':
print "No redshifts for", objtype, len(ii)
#- Only ELGs have [OII] flux
if objtype == 'ELG':
truth['O2FLUX'][ii] = meta['OII_3727']
#- Everyone had a templateid
truth['TEMPLATEID'][ii] = meta['TEMPLATEID']
#- Extract magnitudes from colors
#- TODO: make this more consistent at the input level
#- Standard Stars have SDSS magnitudes
if objtype == 'STD':
magr = meta['SDSS_R']
magi = magr - meta['SDSS_RI']
magz = magi - meta['SDSS_IZ']
magg = magr - meta['SDSS_GR'] #- R-G, not G-R ?
magu = magg - meta['SDSS_UG']
mags = np.vstack( [magu, magg, magr, magi, magz] ).T
filters = ['SDSS_U', 'SDSS_G', 'SDSS_R', 'SDSS_I', 'SDSS_Z']
fibermap['MAG'][ii] = mags
fibermap['FILTER'][ii] = filters
#- LRGs
elif objtype == 'LRG':
magz = meta['DECAM_Z']
magr = magz - meta['DECAM_RZ']
magw = magr - meta['DECAM_RW1']
fibermap['MAG'][ii, 0:3] = np.vstack( [magr, magz, magw] ).T
fibermap['FILTER'][ii, 0:3] = ['DECAM_R', 'DECAM_Z', 'WISE_W1']
#- ELGs
elif objtype == 'ELG':
magr = meta['DECAM_R']
magg = magr - meta['DECAM_GR']
magz = magr - meta['DECAM_RZ']
fibermap['MAG'][ii, 0:3] = np.vstack( [magg, magr, magz] ).T
fibermap['FILTER'][ii, 0:3] = ['DECAM_G', 'DECAM_R', 'DECAM_Z']
elif objtype == 'QSO':
#- QSO templates don't have magnitudes yet
pass
#- Load fiber -> positioner mapping and tile information
fiberpos = desimodel.io.load_fiberpos()
#- Where are these targets? Centered on positioners for now.
x = fiberpos['X'][0:nspec]
y = fiberpos['Y'][0:nspec]
fp = FocalPlane(tile_ra, tile_dec)
ra = np.zeros(nspec)
dec = np.zeros(nspec)
for i in range(nspec):
ra[i], dec[i] = fp.xy2radec(x[i], y[i])
#- Fill in the rest of the fibermap structure
fibermap['FIBER'] = np.arange(nspec, dtype='i4')
fibermap['POSITIONER'] = fiberpos['POSITIONER'][0:nspec]
fibermap['SPECTROID'] = fiberpos['SPECTROGRAPH'][0:nspec]
fibermap['TARGETID'] = np.random.randint(sys.maxint, size=nspec)
fibermap['TARGETCAT'] = np.zeros(nspec, dtype='|S20')
fibermap['LAMBDAREF'] = np.ones(nspec, dtype=np.float32)*5400
fibermap['TARGET_MASK0'] = np.zeros(nspec, dtype='i8')
fibermap['RA_TARGET'] = ra
fibermap['DEC_TARGET'] = dec
fibermap['X_TARGET'] = x
fibermap['Y_TARGET'] = y
fibermap['X_FVCOBS'] = fibermap['X_TARGET']
fibermap['Y_FVCOBS'] = fibermap['Y_TARGET']
fibermap['X_FVCERR'] = np.zeros(nspec, dtype=np.float32)
fibermap['Y_FVCERR'] = np.zeros(nspec, dtype=np.float32)
fibermap['RA_OBS'] = fibermap['RA_TARGET']
fibermap['DEC_OBS'] = fibermap['DEC_TARGET']
fibermap['BRICKNAME'] = brick.brickname(ra, dec)
return fibermap, truth
#-------------------------------------------------------------------------
#- Currently unused, but keep around for now
def sample_nz(objtype, n):
"""
Given `objtype` = 'LRG', 'ELG', 'QSO', 'STAR', 'STD'
return array of `n` redshifts that properly sample n(z)
from $DESIMODEL/data/targets/nz*.dat
"""
#- TODO: should this be in desimodel instead?
#- Stars are at redshift 0 for now. Could consider a velocity dispersion.
if objtype in ('STAR', 'STD'):
return np.zeros(n, dtype=float)
#- Determine which input n(z) file to use
targetdir = os.getenv('DESIMODEL')+'/data/targets/'
objtype = objtype.upper()
if objtype == 'LRG':
infile = targetdir+'/nz_lrg.dat'
elif objtype == 'ELG':
infile = targetdir+'/nz_elg.dat'
elif objtype == 'QSO':
#- TODO: should use full dNdzdg distribution instead
infile = targetdir+'/nz_qso.dat'
else:
raise ValueError("objtype {} not recognized (ELG LRG QSO STD STAR)".format(objtype))
#- Read n(z) distribution
zlo, zhi, ntarget = np.loadtxt(infile, unpack=True)[0:3]
#- Construct normalized cumulative density function (cdf)
cdf = np.cumsum(ntarget, dtype=float)
cdf /= cdf[-1]
#- Sample that distribution
x = np.random.uniform(0.0, 1.0, size=n)
return np.interp(x, cdf, zhi)
|
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Chromaprint/Acoustid acoustic fingerprinting support to the
autotagger. Requires the pyacoustid library.
"""
from __future__ import division, absolute_import, print_function
from beets import plugins
from beets import ui
from beets import util
from beets import config
from beets.util import confit
from beets.autotag import hooks
import acoustid
from collections import defaultdict
API_KEY = '1vOwZtEn'
SCORE_THRESH = 0.5
TRACK_ID_WEIGHT = 10.0
COMMON_REL_THRESH = 0.6 # How many tracks must have an album in common?
MAX_RECORDINGS = 5
MAX_RELEASES = 5
# Stores the Acoustid match information for each track. This is
# populated when an import task begins and then used when searching for
# candidates. It maps audio file paths to (recording_ids, release_ids)
# pairs. If a given path is not present in the mapping, then no match
# was found.
_matches = {}
# Stores the fingerprint and Acoustid ID for each track. This is stored
# as metadata for each track for later use but is not relevant for
# autotagging.
_fingerprints = {}
_acoustids = {}
def prefix(it, count):
"""Truncate an iterable to at most `count` items.
"""
for i, v in enumerate(it):
if i >= count:
break
yield v
def acoustid_match(log, path):
"""Gets metadata for a file from Acoustid and populates the
_matches, _fingerprints, and _acoustids dictionaries accordingly.
"""
try:
duration, fp = acoustid.fingerprint_file(util.syspath(path))
except acoustid.FingerprintGenerationError as exc:
log.error(u'fingerprinting of {0} failed: {1}',
util.displayable_path(repr(path)), exc)
return None
_fingerprints[path] = fp
try:
res = acoustid.lookup(API_KEY, fp, duration,
meta='recordings releases')
except acoustid.AcoustidError as exc:
log.debug(u'fingerprint matching {0} failed: {1}',
util.displayable_path(repr(path)), exc)
return None
log.debug(u'chroma: fingerprinted {0}',
util.displayable_path(repr(path)))
# Ensure the response is usable and parse it.
if res['status'] != 'ok' or not res.get('results'):
log.debug(u'no match found')
return None
result = res['results'][0] # Best match.
if result['score'] < SCORE_THRESH:
log.debug(u'no results above threshold')
return None
_acoustids[path] = result['id']
# Get recording and releases from the result.
if not result.get('recordings'):
log.debug(u'no recordings found')
return None
recording_ids = []
release_ids = []
for recording in result['recordings']:
recording_ids.append(recording['id'])
if 'releases' in recording:
release_ids += [rel['id'] for rel in recording['releases']]
log.debug(u'matched recordings {0} on releases {1}',
recording_ids, release_ids)
_matches[path] = recording_ids, release_ids
# Plugin structure and autotagging logic.
def _all_releases(items):
"""Given an iterable of Items, determines (according to Acoustid)
which releases the items have in common. Generates release IDs.
"""
# Count the number of "hits" for each release.
relcounts = defaultdict(int)
for item in items:
if item.path not in _matches:
continue
_, release_ids = _matches[item.path]
for release_id in release_ids:
relcounts[release_id] += 1
for release_id, count in relcounts.items():
if float(count) / len(items) > COMMON_REL_THRESH:
yield release_id
class AcoustidPlugin(plugins.BeetsPlugin):
def __init__(self):
super(AcoustidPlugin, self).__init__()
self.config.add({
'auto': True,
})
config['acoustid']['apikey'].redact = True
if self.config['auto']:
self.register_listener('import_task_start', self.fingerprint_task)
self.register_listener('import_task_apply', apply_acoustid_metadata)
def fingerprint_task(self, task, session):
return fingerprint_task(self._log, task, session)
def track_distance(self, item, info):
dist = hooks.Distance()
if item.path not in _matches or not info.track_id:
# Match failed or no track ID.
return dist
recording_ids, _ = _matches[item.path]
dist.add_expr('track_id', info.track_id not in recording_ids)
return dist
def candidates(self, items, artist, album, va_likely):
albums = []
for relid in prefix(_all_releases(items), MAX_RELEASES):
album = hooks.album_for_mbid(relid)
if album:
albums.append(album)
self._log.debug(u'acoustid album candidates: {0}', len(albums))
return albums
def item_candidates(self, item, artist, title):
if item.path not in _matches:
return []
recording_ids, _ = _matches[item.path]
tracks = []
for recording_id in prefix(recording_ids, MAX_RECORDINGS):
track = hooks.track_for_mbid(recording_id)
if track:
tracks.append(track)
self._log.debug(u'acoustid item candidates: {0}', len(tracks))
return tracks
def commands(self):
submit_cmd = ui.Subcommand('submit',
help=u'submit Acoustid fingerprints')
def submit_cmd_func(lib, opts, args):
try:
apikey = config['acoustid']['apikey'].as_str()
except confit.NotFoundError:
raise ui.UserError(u'no Acoustid user API key provided')
submit_items(self._log, apikey, lib.items(ui.decargs(args)))
submit_cmd.func = submit_cmd_func
fingerprint_cmd = ui.Subcommand(
'fingerprint',
help=u'generate fingerprints for items without them'
)
def fingerprint_cmd_func(lib, opts, args):
for item in lib.items(ui.decargs(args)):
fingerprint_item(self._log, item, write=ui.should_write())
fingerprint_cmd.func = fingerprint_cmd_func
return [submit_cmd, fingerprint_cmd]
# Hooks into import process.
def fingerprint_task(log, task, session):
"""Fingerprint each item in the task for later use during the
autotagging candidate search.
"""
items = task.items if task.is_album else [task.item]
for item in items:
acoustid_match(log, item.path)
def apply_acoustid_metadata(task, session):
"""Apply Acoustid metadata (fingerprint and ID) to the task's items.
"""
for item in task.imported_items():
if item.path in _fingerprints:
item.acoustid_fingerprint = _fingerprints[item.path]
if item.path in _acoustids:
item.acoustid_id = _acoustids[item.path]
# UI commands.
def submit_items(log, userkey, items, chunksize=64):
"""Submit fingerprints for the items to the Acoustid server.
"""
data = [] # The running list of dictionaries to submit.
def submit_chunk():
"""Submit the current accumulated fingerprint data."""
log.info(u'submitting {0} fingerprints', len(data))
try:
acoustid.submit(API_KEY, userkey, data)
except acoustid.AcoustidError as exc:
log.warn(u'acoustid submission error: {0}', exc)
del data[:]
for item in items:
fp = fingerprint_item(log, item)
# Construct a submission dictionary for this item.
item_data = {
'duration': int(item.length),
'fingerprint': fp,
}
if item.mb_trackid:
item_data['mbid'] = item.mb_trackid
log.debug(u'submitting MBID')
else:
item_data.update({
'track': item.title,
'artist': item.artist,
'album': item.album,
'albumartist': item.albumartist,
'year': item.year,
'trackno': item.track,
'discno': item.disc,
})
log.debug(u'submitting textual metadata')
data.append(item_data)
# If we have enough data, submit a chunk.
if len(data) >= chunksize:
submit_chunk()
# Submit remaining data in a final chunk.
if data:
submit_chunk()
def fingerprint_item(log, item, write=False):
"""Get the fingerprint for an Item. If the item already has a
fingerprint, it is not regenerated. If fingerprint generation fails,
return None. If the items are associated with a library, they are
saved to the database. If `write` is set, then the new fingerprints
are also written to files' metadata.
"""
# Get a fingerprint and length for this track.
if not item.length:
log.info(u'{0}: no duration available',
util.displayable_path(item.path))
elif item.acoustid_fingerprint:
if write:
log.info(u'{0}: fingerprint exists, skipping',
util.displayable_path(item.path))
else:
log.info(u'{0}: using existing fingerprint',
util.displayable_path(item.path))
return item.acoustid_fingerprint
else:
log.info(u'{0}: fingerprinting',
util.displayable_path(item.path))
try:
_, fp = acoustid.fingerprint_file(item.path)
item.acoustid_fingerprint = fp
if write:
log.info(u'{0}: writing fingerprint',
util.displayable_path(item.path))
item.try_write()
if item._db:
item.store()
return item.acoustid_fingerprint
except acoustid.FingerprintGenerationError as exc:
log.info(u'fingerprint generation failed: {0}', exc)
|
|
from PyQt4 import QtGui, QtCore
import sys
from Playlist import Playlist
import vlc
import urllib
__author__ = 'postrowski'
# -*-coding: utf-8-*-
class SystemTray(QtGui.QSystemTrayIcon):
"""
Class System Tray which show app indicator and supports its actions.
"""
def __init__(self, parent):
super(SystemTray, self).__init__(parent)
self.sc = QtGui.QFileDialog()
self.nowPlayingLabel = parent.nowPlayingLabel
self.programLabel = parent.programLabel
self.logoLabel = parent.logoLabel
self.coverWebView = parent.coverWebView
self.programWebView = parent.programWebView
self.tray_menu = parent.tray_menu
self.tray_icon = parent.tray_icon
self.hide_ui = parent.hide_ui
self.show_ui = parent.show_ui
self.central_widget = parent.central_widget
self.timer_show = parent.timer_show
self.setup_menu()
self.playlist = Playlist(self)
self.instance = vlc.Instance() # create a vlc instance
self.player = self.instance.media_player_new() # create a empty vlc media player
stream = 'http://shoutcast.rtl.it:3010/stream/1/'
option = '--extraintf=http' # enable web interface
self.media = self.instance.media_new(stream, option) # create the media
self.player.set_media(self.media)
self.info_0 = None # this variable always before set_meta_data call is None
self.timer_check = QtCore.QTimer()
self.connect(self.timer_check, QtCore.SIGNAL("timeout()"), self.set_meta_data) # polling every second
self.my_dict = {}
def setup_menu(self):
"""
Setup app indicator menu.
:return: None
"""
# menu
self.show_action = QtGui.QAction("Show", self.tray_menu)
self.connect(self.show_action, QtCore.SIGNAL("triggered()"), self.show_all)
self.tray_menu.addAction(self.show_action)
self.play_pause_action = QtGui.QAction("Play", self.tray_menu)
self.connect(self.play_pause_action, QtCore.SIGNAL("triggered()"), self.play_pause)
self.tray_menu.addAction(self.play_pause_action)
self.stop_action = QtGui.QAction("Stop", self.tray_menu)
self.connect(self.stop_action, QtCore.SIGNAL("triggered()"), self.stop)
self.tray_menu.addAction(self.stop_action)
self.stop_action.setVisible(False)
self.save_cover_action = QtGui.QAction("Save album cover", self.tray_menu)
self.connect(self.save_cover_action, QtCore.SIGNAL("triggered()"),
lambda: self.save_picture(self.my_dict["album_cover"],
self.my_dict[u"artist_name"] + " - " + self.my_dict[u"album_title"]))
self.tray_menu.addAction(self.save_cover_action)
self.save_cover_action.setVisible(False)
self.save_image_action = QtGui.QAction("Save program image", self.tray_menu)
self.connect(self.save_image_action, QtCore.SIGNAL("triggered()"),
lambda: self.save_picture(self.my_dict["program_image"],
self.my_dict[u"program_title"] + " - " + self.my_dict[u"speakers"]))
self.tray_menu.addAction(self.save_image_action)
self.save_image_action.setVisible(False)
quit_action = QtGui.QAction("Quit", self.tray_menu)
self.connect(quit_action, QtCore.SIGNAL("triggered()"), self.quit_app)
self.tray_menu.addAction(quit_action)
# system tray icon
self.tray_icon.setIcon(QtGui.QIcon(":/images/icon.png"))
self.tray_icon.setContextMenu(self.tray_menu)
self.tray_icon.show()
def hide_all(self):
"""
Hide UI.
"""
self.hide_ui()
self.central_widget.hide()
def show_all(self):
""""
Show UI for 10 seconds, then hide it.
"""
print "show"
self.show_ui()
self.central_widget.show()
self.timer_show.start(10000) # 10 seconds, display UI time in ms
self.timer_show.timeout.connect(self.hide_all)
def set_meta_data(self):
"""
Set xml meta data and show message. Check if images are available to download.
:return: None
"""
info_1 = self.media.get_meta(vlc.Meta.NowPlaying) # get xml data
if info_1 != self.info_0:
self.info_0 = info_1
# print "now playing: {0}".format(self.info_0)
self.playlist.set_info(self.playlist.xml_to_dict(self.info_0))
self.playlist.show_msg()
self.my_dict = self.playlist.xml_to_dict(self.info_0)
# print "my_dict: ", self.my_dict
if self.player.is_playing():
try:
if self.my_dict["album_cover"]:
self.save_cover_action.setVisible(True)
else:
self.save_cover_action.setVisible(False)
except TypeError: # parse data delay when play button pressed
pass
try:
if self.my_dict["program_image"]:
self.save_image_action.setVisible(True)
else:
self.save_image_action.setVisible(False)
except TypeError: # parse data delay when play button pressed
pass
def play_pause(self):
"""
Play or pause radio stream.
:return: None
"""
if self.player.is_playing():
# print "paused"
self.timer_show.killTimer(10)
self.timer_check.stop()
self.play_pause_action.setText("Paused")
self.player.pause()
self.hide_all()
self.stop_action.setVisible(True)
else:
# print "play"
self.timer_check.start(1000)
self.play_pause_action.setText("Pause")
self.player.play()
self.set_meta_data()
self.playlist.show_msg()
self.stop_action.setVisible(True)
def stop(self):
"""
Stop stream.
:return: None
"""
# print "stop"
self.player.stop()
self.play_pause_action.setText("Play")
self.stop_action.setVisible(False)
self.save_cover_action.setVisible(False)
self.save_image_action.setVisible(False)
self.hide_all()
@staticmethod
def save_picture(url, file_name):
"""
Save album cover and/or program image.
:param url: file url
:param file_name: file name
:return: None
"""
location = QtGui.QFileDialog()
dir_path = QtCore.QDir()
path = dir_path.homePath() + dir_path.separator() + unicode(file_name)
file_path = location.getSaveFileName(location, "Save file as", path)
if location:
urllib.urlretrieve(url, unicode(file_path))
@staticmethod
def quit_app():
"""
Close application.
:return: None
"""
# print "quit"
sys.exit()
|
|
from django.core.exceptions import ValidationError
import pytest
from django_super_deduper.merge import MergedModelInstance
from django_super_deduper.models import ModelMeta
from tests.factories import (
ArticleFactory,
EarningsReportFactory,
NewsAgencyFactory,
PlaceFactory,
PublicationFactory,
ReporterFactory,
RestaurantFactory,
WaiterFactory,
)
@pytest.mark.django_db
class MergedModelInstanceTest(object):
def test_merge_basic_model(self):
primary_object = PlaceFactory.create(address=None)
alias_object = PlaceFactory.create()
merged_object = MergedModelInstance.create(primary_object, [alias_object])
assert merged_object.address == alias_object.address
def test_dedupe_basic_model_no_merge(self):
primary_object = PlaceFactory.create(address=None)
alias_object = PlaceFactory.create()
merged_object = MergedModelInstance.create(primary_object, [alias_object], merge_field_values=False)
assert merged_object.address == primary_object.address
def test_merge_model_with_multi_table_inheritance(self):
primary_object = NewsAgencyFactory.create(address=None, website=None)
alias_object = NewsAgencyFactory.create()
merged_object = MergedModelInstance.create(primary_object, [alias_object])
assert merged_object.address == alias_object.address
assert merged_object.website == alias_object.website
def test_merge_model_with_o2o_relationship(self):
primary_object = RestaurantFactory.create(place=None, serves_hot_dogs=True, serves_pizza=False)
alias_object = RestaurantFactory.create(serves_hot_dogs=False, serves_pizza=True)
alias_address = alias_object.place.address
alias_name = alias_object.place.name
merged_object = MergedModelInstance.create(primary_object, [alias_object])
assert merged_object.place.address == alias_address
assert merged_object.place.name == alias_name
assert merged_object.serves_hot_dogs and not merged_object.serves_pizza
def test_dedupe_model_with_o2o_relationship_no_merge(self):
primary_object = RestaurantFactory.create(place=None, serves_hot_dogs=True, serves_pizza=False)
alias_object = RestaurantFactory.create(serves_hot_dogs=False, serves_pizza=True)
merged_object = MergedModelInstance.create(primary_object, [alias_object], merge_field_values=False)
assert not merged_object.place
assert merged_object.serves_hot_dogs and not merged_object.serves_pizza
def test_merge_model_with_o2m_relationship(self):
primary_object = NewsAgencyFactory.create()
alias_object = NewsAgencyFactory.create()
related_object = ReporterFactory.create(news_agency=alias_object)
merged_object = MergedModelInstance.create(primary_object, [alias_object])
assert related_object.news_agency != merged_object
related_object.refresh_from_db()
assert related_object.news_agency == merged_object
def test_merge_model_with_o2m_relationship_and_unique_validation_set_null(self):
primary_object, alias_object = RestaurantFactory.create_batch(2)
waiter = WaiterFactory(restaurant=primary_object)
duplicate_waiter = WaiterFactory(name=waiter.name, restaurant=alias_object)
merged_object = MergedModelInstance.create(primary_object, [alias_object])
waiter.refresh_from_db()
assert waiter.restaurant == merged_object
duplicate_waiter.refresh_from_db()
assert duplicate_waiter.restaurant is None
def test_merge_model_with_o2m_relationship_and_unique_validation_delete(self):
primary_object, alias_object = RestaurantFactory.create_batch(2)
report = EarningsReportFactory(restaurant=primary_object)
other_report = EarningsReportFactory(date=report.date, restaurant=alias_object)
merged_object = MergedModelInstance.create(primary_object, [alias_object])
report.refresh_from_db()
assert report.restaurant == merged_object
with pytest.raises(EarningsReportFactory._meta.model.DoesNotExist):
other_report.refresh_from_db()
def test_merge_model_with_o2m_relationship_and_raise_unique_validation(self):
primary_object, alias_object = RestaurantFactory.create_batch(2)
report = EarningsReportFactory(restaurant=primary_object)
EarningsReportFactory(date=report.date, restaurant=alias_object)
with pytest.raises(ValidationError):
MergedModelInstance.create(primary_object, [alias_object], raise_validation_exception=True)
def test_merge_model_with_m2m_relationship(self):
primary_object = ArticleFactory.create(reporter=None)
related_object = ReporterFactory.create()
alias_object = ArticleFactory.create(number_of_publications=3, reporter=related_object)
assert primary_object.reporter is None
assert primary_object.publications.count() == 0
merged_object = MergedModelInstance.create(primary_object, [alias_object])
assert merged_object.reporter == related_object
assert merged_object.publications.count() == 3
def test_merge_model_with_reverse_m2m_relationsip(self):
primary_object = PublicationFactory.create()
alias_object = PublicationFactory.create(number_of_articles=3)
assert primary_object.article_set.count() == 0
merged_object = MergedModelInstance.create(primary_object, [alias_object])
assert merged_object.article_set.count() == 3
def test_merge_different_models(self):
primary_object = ArticleFactory.create()
alias_object = ReporterFactory.create()
with pytest.raises(TypeError):
MergedModelInstance.create(primary_object, [alias_object])
def test_merge_deletes_alias_objects(self):
primary_object = PlaceFactory.create(address=None)
alias_object = PlaceFactory.create()
assert primary_object.address is None
assert PlaceFactory._meta.model.objects.filter(pk=alias_object.pk).exists()
merged_object = MergedModelInstance.create(primary_object, [alias_object], keep_old=False)
assert merged_object.address == alias_object.address
assert not PlaceFactory._meta.model.objects.filter(pk=alias_object.pk).exists()
def test_prevent_self_merge(self):
primary_object = PlaceFactory.create(address=None)
alias_object = primary_object
with pytest.raises(ValueError):
MergedModelInstance.create(primary_object, [alias_object])
def test_o2o_merge_with_audit_trail(self):
primary_object = RestaurantFactory.create(place=None, serves_hot_dogs=True, serves_pizza=False)
alias_objects = RestaurantFactory.create_batch(3)
related_object = set([alias_objects[0].place])
_, audit_trail = MergedModelInstance.create_with_audit_trail(primary_object, alias_objects)
assert set(audit_trail) == related_object
def test_o2m_merge_with_audit_trail(self):
primary_object = NewsAgencyFactory.create()
alias_object = NewsAgencyFactory.create()
related_objects = set(ReporterFactory.create_batch(3, news_agency=alias_object))
_, audit_trail = MergedModelInstance.create_with_audit_trail(primary_object, [alias_object])
assert set(audit_trail) == related_objects
def test_m2m_merge_with_audit_trail(self):
primary_object = ArticleFactory.create(reporter=None)
related_object = ReporterFactory.create()
alias_object = ArticleFactory.create(number_of_publications=3, reporter=related_object)
related_objects = set(alias_object.publications.all())
_, audit_trail = MergedModelInstance.create_with_audit_trail(primary_object, [alias_object])
assert set(audit_trail) == related_objects
def test_reverse_m2m_merge_with_audit_trail(self):
primary_object = PublicationFactory.create()
alias_object = PublicationFactory.create(number_of_articles=3)
related_objects = set(alias_object.article_set.all())
_, audit_trail = MergedModelInstance.create_with_audit_trail(primary_object, [alias_object])
assert set(audit_trail) == related_objects
def test_merge_generic_foreign_keys(self):
primary_object = ArticleFactory()
alias_object = ArticleFactory()
primary_object.tags.create(content_object=primary_object, tag='django')
alias_object.tags.create(content_object=alias_object, tag='python')
merged_object = MergedModelInstance.create(primary_object, [alias_object], keep_old=False)
assert merged_object.tags.count() == 2
@pytest.mark.django_db
class ModelMetaTest(object):
def test_unmanaged_related_fields(self):
instance = RestaurantFactory()
model_meta = ModelMeta(instance)
for field in model_meta.related_fields:
assert field.related_model._meta.managed
|
|
"""
Django settings for bar project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
import os
import dj_database_url
from django.contrib import messages
from django.utils.crypto import get_random_string
DEBUG = os.environ.get('DEBUG') == '1'
TEMPLATE_DEBUG = DEBUG
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
SITE_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
BASE_DIR = os.path.abspath(os.path.join(SITE_ROOT, ".."))
# Heroku platform settings.
HEROKU_APP_NAME = "acalfg"
HEROKU_BUILDPACK_URL = "https://github.com/heroku/heroku-buildpack-python.git"
# The name and domain of this site.
SITE_NAME = "acalfg"
SITE_DOMAIN = "acalfg.com"
PREPEND_WWW = False
# Security settings.
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
ALLOWED_HOSTS = (
SITE_DOMAIN,
'www.' + SITE_DOMAIN,
"{HEROKU_APP_NAME}.herokuapp.com".format(
HEROKU_APP_NAME = HEROKU_APP_NAME,
),
)
# Database settings.
DATABASES = {
"default": dj_database_url.config(default="postgresql://"),
}
# Use Amazon S3 for storage for uploaded media files.
DEFAULT_FILE_STORAGE = "storages.backends.s3boto.S3BotoStorage"
# Use Amazon S3 and RequireJS for static files storage.
STATICFILES_STORAGE = "require_s3.storage.OptimizedCachedStaticFilesStorage"
# Amazon S3 settings.
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
AWS_STORAGE_BUCKET_NAME = os.environ.get("AWS_STORAGE_BUCKET_NAME")
AWS_AUTO_CREATE_BUCKET = True
AWS_HEADERS = {
"Cache-Control": "public, max-age=86400",
}
AWS_S3_FILE_OVERWRITE = False
AWS_QUERYSTRING_AUTH = False
AWS_S3_SECURE_URLS = True
AWS_REDUCED_REDUNDANCY = False
AWS_IS_GZIPPED = False
STATIC_URL = "https://{bucket_name}.s3.amazonaws.com/".format(
bucket_name = AWS_STORAGE_BUCKET_NAME,
)
# Email settings.
EMAIL_HOST = "smtp.sendgrid.net"
EMAIL_HOST_USER = os.environ.get("SENDGRID_USERNAME")
EMAIL_HOST_PASSWORD = os.environ.get("SENDGRID_PASSWORD")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
SERVER_EMAIL = u"{name} <notifications@{domain}>".format(
name = SITE_NAME,
domain = SITE_DOMAIN,
)
DEFAULT_FROM_EMAIL = SERVER_EMAIL
EMAIL_SUBJECT_PREFIX = "[%s] " % SITE_NAME
# Error reporting settings. Use these to set up automatic error notifications.
ADMINS = ()
MANAGERS = ()
SEND_BROKEN_LINK_EMAILS = False
# Locale settings.
TIME_ZONE = "UTC"
LANGUAGE_CODE = "en-gb"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# A list of additional installed applications.
INSTALLED_APPS = (
"django.contrib.sessions",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.admin",
"bootstrap3",
"bootstrap_pagination",
"crispy_forms",
"herokuapp",
"board",
)
# Additional static file locations.
STATICFILES_DIRS = (
os.path.join(SITE_ROOT, "static"),
)
# Dispatch settings.
MIDDLEWARE_CLASSES = (
"django.middleware.gzip.GZipMiddleware",
"herokuapp.middleware.CanonicalDomainMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
)
ROOT_URLCONF = "acalfg.urls"
WSGI_APPLICATION = "acalfg.wsgi.application"
SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
MESSAGE_STORAGE = "django.contrib.messages.storage.cookie.CookieStorage"
SITE_ID = 1
# Absolute path to the directory where templates are stored.
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, "templates"),
)
TEMPLATE_LOADERS = (
("django.template.loaders.cached.Loader", (
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
)),
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
)
# Namespace for cache keys, if using a process-shared cache.
CACHE_MIDDLEWARE_KEY_PREFIX = "acalfg"
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
},
# Long cache timeout for staticfiles, since this is used heavily by the optimizing storage.
"staticfiles": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"TIMEOUT": 60 * 60 * 24 * 365,
"LOCATION": "staticfiles",
},
}
# A secret key used for cryptographic algorithms.
SECRET_KEY = os.environ.get("SECRET_KEY", get_random_string(50, "abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)"))
# The Google Maps API key.
GOOGLE_MAPS_API_KEY = os.environ.get('GOOGLE_MAPS_API_KEY')
CRISPY_TEMPLATE_PACK = 'bootstrap3'
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
# Logging configuration.
LOGGING = {
"version": 1,
# Don't throw away default loggers.
"disable_existing_loggers": False,
"handlers": {
# Redefine console logger to run in production.
"console": {
"level": "INFO",
"class": "logging.StreamHandler",
},
},
"loggers": {
# Redefine django logger to use redefined console logging.
"django": {
"handlers": ["console"],
}
}
}
|
|
#!/usr/bin/env python3
############################################################################
# Copyright 2017 RIFT.IO Inc #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
import argparse
import logging
import os
import stat
import subprocess
import sys
import time
import yaml
class ConfigurationError(Exception):
pass
def configure_waf_haproxy_cp(logger, run_dir, mgmt_ip, haproxy_cp_ip):
sh_file = "{}/waf_set_haproxy_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "centos"
set addr {mgmt_ip}
set pw "centos"
set retry 0
set max 20
while {{ $retry < $max }} {{
sleep 5
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@$addr
set timeout 10
expect "yes/no" {{
send "yes\r"
expect "*?assword:" {{ send "$pw\r"; break }}
}} "*?assword:" {{ send "$pw\r"; break }}
set retry [ expr $retry+1 ]
if {{ $retry == $max }} {{
puts "Configuration timed out."
exit 1
}}
}}
expect "]$ "
send "sudo su\r"
expect "]# "
send "echo \"<VirtualHost *:80>\r"
send " AddDefaultCharset UTF-8\r"
send " ProxyPreserveHost On\r"
send " ProxyRequests off\r"
send " ProxyVia Off\r"
send " ProxyPass / http://{haproxy_cp_ip}:5000/\r"
send " ProxyPassReverse / http://{haproxy_cp_ip}:5000/\r"
send " </VirtualHost>\" > /etc/httpd/conf.d/waf_proxy.conf\r"
expect "]# "
send "echo \"<IfModule mod_security2.c>\r"
send " IncludeOptional modsecurity.d/owasp-modsecurity-crs/modsecurity_crs_10_setup.conf\r"
send " IncludeOptional modsecurity.d/owasp-modsecurity-crs/base_rules/*.conf\r\r"
send " SecRuleEngine On\r"
send " SecRequestBodyAccess On\r"
send " SecResponseBodyAccess On\r"
send " SecDebugLog /var/log/httpd/modsec-debug.log\r"
send " SecDebugLogLevel 3\r"
send "</IfModule>\" > /etc/httpd/conf.d/mod_security.conf\r"
expect "]# "
send "systemctl stop httpd\r"
expect "]# "
send "systemctl start httpd\r"
expect "]# "
'''.format(mgmt_ip=mgmt_ip, haproxy_cp_ip=haproxy_cp_ip))
os.chmod(sh_file, stat.S_IRWXU)
rc = subprocess.call(sh_file, shell=True)
if rc != 0:
raise ConfigurationError("HAProxy add waf config failed: {}".format(rc))
def configure_haproxy_add_waf(logger, run_dir, haproxy_mgmt_ip, waf_cp_ip, waf_server_name):
sh_file = "{}/haproxy_add_waf_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "centos"
set addr {mgmt_ip}
set pw "centos"
set retry 0
set max 20
while {{ $retry < $max }} {{
sleep 5
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@$addr
set timeout 10
expect "yes/no" {{
send "yes\r"
expect "*?assword:" {{ send "$pw\r"; break }}
}} "*?assword:" {{ send "$pw\r"; break }}
set retry [ expr $retry+1 ]
if {{ $retry == $max }} {{
puts "Configuration timed out."
exit 1
}}
}}
expect "]$ "
send "sudo su\r"
expect "]# "
send "grep \"server {waf_server_name} {waf_cp_ip}\" /etc/haproxy/haproxy.cfg && echo \"Already configured\" && exit 0\r"
expect {{
"]$ " {{ exit }}
"]# "
}}
send "sed -i \'s/\\(.*WAF list.*\\)/\\1\\n server {waf_server_name} {waf_cp_ip}:80 check/g\' /etc/haproxy/haproxy.cfg\r"
expect "]# "
send "systemctl reload haproxy\r"
expect "]# "
'''.format(mgmt_ip=haproxy_mgmt_ip, waf_cp_ip=waf_cp_ip, waf_server_name=waf_server_name))
os.chmod(sh_file, stat.S_IRWXU)
rc = subprocess.call(sh_file, shell=True)
if rc != 0:
raise ConfigurationError("HAProxy add waf config failed: {}".format(rc))
def configure_haproxy_remove_waf(logger, run_dir, haproxy_mgmt_ip, waf_server_name):
sh_file = "{}/haproxy_remove_httpd_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "centos"
set addr {mgmt_ip}
set pw "centos"
set retry 0
set max 20
while {{ $retry < $max }} {{
sleep 5
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@$addr
set timeout 10
expect "yes/no" {{
send "yes\r"
expect "*?assword:" {{ send "$pw\r"; break }}
}} "*?assword:" {{ send "$pw\r"; break }}
set retry [ expr $retry+1 ]
if {{ $retry == $max }} {{
puts "Configuration timed out."
exit 1
}}
}}
expect "]$ "
send "sudo su\r"
expect "]# "
send "sed -i \'/server {waf_server_name}/d\' /etc/haproxy/haproxy.cfg\r"
expect "]# "
send "systemctl reload haproxy\r"
expect "]# "
'''.format(mgmt_ip=haproxy_mgmt_ip, waf_server_name=waf_server_name))
os.chmod(sh_file, stat.S_IRWXU)
rc = subprocess.call(sh_file, shell=True)
if rc != 0:
raise ConfigurationError("HAProxy remove waf config failed: {}".format(rc))
def main(argv=sys.argv[1:]):
try:
parser = argparse.ArgumentParser()
parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("--quiet", "-q", dest="verbose", action="store_false")
args = parser.parse_args()
run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
if not os.path.exists(run_dir):
os.makedirs(run_dir)
log_file = "{}/rift_waf_config-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logging.basicConfig(filename=log_file, level=logging.DEBUG)
logger = logging.getLogger()
ch = logging.StreamHandler()
if args.verbose:
ch.setLevel(logging.DEBUG)
else:
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
except Exception as e:
print("Got exception:{}".format(e))
raise
try:
dry_run = args.dry_run
yaml_str = args.yaml_cfg_file.read()
logger.debug("Input YAML file: %s", yaml_str)
yaml_cfg = yaml.load(yaml_str)
logger.debug("Input YAML cfg: %s", yaml_cfg)
# Check if this is post scale out trigger
def find_cp_ip(vnfr_list, vnfd_name, cp_name):
for vnfr in vnfr_list:
if vnfd_name in vnfr['name']:
for cp in vnfr['connection_points']:
logger.debug("Connection point: %s", format(cp))
if cp_name in cp['name']:
return cp['ip_address']
raise ValueError("Could not find vnfd %s connection point %s", vnfd_name, cp_name)
def find_mgmt_ip(vnfr_list, vnfd_name):
for vnfr in vnfr_list:
if vnfd_name in vnfr['name']:
return vnfr['rw_mgmt_ip']
raise ValueError("Could not find vnfd %s mgmt ip", vnfd_name)
def find_vnfr(vnfr_list, vnfd_name):
for vnfr in vnfr_list:
if vnfd_name in vnfr['name']:
return vnfr
raise ValueError("Could not find vnfd %s", vnfd_name)
haproxy_cp_ip = find_cp_ip(yaml_cfg['vnfrs_others'], "haproxy_vnfd", "cp0")
haproxy_mgmt_ip = find_mgmt_ip(yaml_cfg['vnfrs_others'], "haproxy_vnfd")
waf_cp_ip = find_cp_ip(yaml_cfg['vnfrs_in_group'], "waf_vnfd", "cp0")
waf_mgmt_ip = find_mgmt_ip(yaml_cfg['vnfrs_in_group'], "waf_vnfd")
waf_vnfr = find_vnfr(yaml_cfg['vnfrs_in_group'], "waf_vnfd")
# HAProxy wants to use a name without .'s
waf_server_name = waf_vnfr["name"].replace(".", "__")
if yaml_cfg['trigger'] == 'post_scale_out':
logger.debug("Sleeping for 60 seconds to give VNFD mgmt VM a chance to boot up")
time.sleep(60)
configure_haproxy_add_waf(logger, run_dir, haproxy_mgmt_ip, waf_cp_ip, waf_server_name)
configure_waf_haproxy_cp(logger, run_dir, waf_mgmt_ip, haproxy_cp_ip)
elif yaml_cfg['trigger'] == 'pre_scale_in':
configure_haproxy_remove_waf(logger, run_dir, haproxy_mgmt_ip, waf_server_name)
else:
raise ValueError("Unexpected trigger {}".format(yaml_cfg['trigger']))
except Exception as e:
logger.exception(e)
raise
if __name__ == "__main__":
main()
|
|
from enum import Enum
import math
import random
import numpy as np
###############
# FUNCTIONS #
###############
N_FUNS = 15
SUM = 0
SUB = 1
MUL = 2
DIV = 3
EQ = 4
GRT = 5
LRT = 6
ZER = 7
EXP = 8
LOG = 9
ABS = 10
MIN = 11
MAX = 12
POW = 13
AFF = 14
funs_names = {SUM: '+',
SUB: '-',
MUL: '*',
DIV: '/',
ZER: 'ZER',
EQ: '==',
GRT: '>',
LRT: '<',
EXP: 'EXP',
LOG: 'LOG',
ABS: 'ABS',
MIN: 'MIN',
MAX: 'MAX',
AFF: 'AFF',
POW: '^'}
names_funs = {}
for fn, name in funs_names.items():
names_funs[name] = fn
def str2fun(st):
if st in names_funs:
return names_funs[st]
return None
def fun2str(func):
if func in funs_names:
return funs_names[func]
return None
def fun_cond_pos(func):
if func == ZER or func == AFF:
return 1
elif func == EQ or func == GRT or func == LRT:
return 2
else:
return -1
def fun_arity(func):
if func in {EXP, LOG, ABS}:
return 1
elif func in {SUM, SUB, MUL, DIV, MIN, MAX, POW}:
return 2
elif func in {ZER, AFF}:
return 3
elif func in {EQ, GRT, LRT}:
return 4
# this should not happen
return 0
###################
# PROGRAM NODES #
###################
class NodeType(Enum):
FUN = 0
VAR = 1
VAL = 2
class NodeDynStatus(Enum):
UNUSED = 0
CONSTANT = 1
DYNAMIC = 2
def create_val(val, prog, parent):
node = Node(prog, parent)
node.type = NodeType.VAL
node.val = val
return node
def create_var(var, prog, parent):
node = Node(prog, parent)
node.type = NodeType.VAR
node.var = var
return node
def create_fun(fun, prog, parent):
node = Node(prog, parent)
node.type = NodeType.FUN
node.fun = fun
node.condpos = fun_cond_pos(fun)
node.stoppos = fun_arity(fun)
return node
def create_random_node_tree(prog, prob_term, parent, min_depth, grow, depth):
p = random.random()
if ((not grow) or p > prob_term) and depth < min_depth:
fun = random.randrange(N_FUNS)
node = create_fun(fun, prog, parent)
for i in range(node.arity()):
node.params.append(create_random_node_tree(prog, prob_term, node,
min_depth, grow,
depth + 1))
else:
if random.randrange(2) == 0 and prog.varcount > 0:
var = random.randrange(prog.varcount)
node = create_var(var, prog, parent)
else:
r = random.randrange(10)
if r == 0:
val = 0.
elif r > 5:
val = random.randrange(10)
else:
val = random.random()
node = create_val(val, prog, parent)
return node
class Node(object):
def __init__(self, prog, parent):
self.prog = prog
self.parent = parent
self.params = []
self.type = 0
self.val = 0.
self.var = 0
self.fun = 0
self.curval = 0.
self.curpos = 0
self.condpos = -1
self.stoppos = 0
self.branching = 0
self.dyn_status = NodeDynStatus.UNUSED
def clone(self, prog, parent):
if self.type == NodeType.VAL:
cnode = create_val(self.val, prog, parent)
elif self.type == NodeType.VAR:
cnode = create_var(self.var, prog, parent)
else:
cnode = create_fun(self.fun, prog, parent)
cnode.curval = self.curval
cnode.branching = self.branching
cnode.dyn_status = self.dyn_status
for param in self.params:
cnode.params.append(param.clone(prog, cnode))
return cnode
def arity(self):
if self.type == NodeType.FUN:
return fun_arity(self.fun)
else:
return 0
def size(self):
s = 1
for param in self.params:
s += param.size()
return s
def node_by_pos(self, pos):
if pos == 0:
return self
cur_pos = 1
for i in range(len(self.params)):
param = self.params[i]
s = param.size()
if pos < cur_pos + s:
return param.node_by_pos(pos - cur_pos)
cur_pos += s
return None
def branching_distance(self, node):
distance = 0
if self.branching != node.branching:
distance += 1
# TODO: check both have same number of params!
for i in range(len(self.params)):
distance += self.params[i].branching_distance2(node.params[i])
return distance
def clear_branching(self):
self.branching = -1
for param in self.params:
param.clear_branching()
def __str__(self):
if self.type == NodeType.VAL:
return str(self.val)
elif self.type == NodeType.VAR:
return '${}'.format(self.prog.var_names[self.var])
elif self.type == NodeType.FUN:
return fun2str(self.fun)
else:
return '???'
##############
# PROGRAMS #
##############
def token_start(prog_str, pos):
curpos = pos
curchar = prog_str[curpos]
while curchar in {' ', '\n', '\t', '\r', ')', '(', 0}:
curpos += 1
curchar = prog_str[curpos]
return curpos
def token_end(prog_str, pos):
curpos = pos
curchar = prog_str[curpos]
while curchar not in {' ', '\n', '\t', '\r', ')', '(', 0}:
curpos += 1
if curpos >= len(prog_str):
return curpos
curchar = prog_str[curpos]
return curpos
def parse(prog_str, var_names, prog=None, parent=None):
if prog is None:
prog = Prog(var_names)
start = token_start(prog_str, prog.parse_pos)
end = token_end(prog_str, start)
token = prog_str[start:end]
try:
val = float(token)
node = create_val(val, prog, parent)
except ValueError:
if token[0] == '$':
var = prog.variable_indices[token[1:]]
node = create_var(var, prog, parent)
else:
fun = str2fun(token)
node = create_fun(fun, prog, parent)
prog.parse_pos = end
for i in range(node.arity()):
parse(prog_str, vars, prog, node)
param = prog.root
node.params.append(param)
prog.root = node
return prog
prog.parse_pos = end
prog.root = node
return prog
def load(var_names, file_path):
with open(file_path) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
prog_str = ''
for line in lines:
if len(line) > 0 and line[0] != '#':
prog_str += line
return parse(prog_str, var_names)
def create_random(var_names, prob_term=.4, depth_low_limit=2,
depth_high_limit=5, grow=None):
if grow is None:
grow = random.randrange(2) == 0
max_depth = (depth_low_limit +
random.randrange(depth_high_limit - depth_low_limit))
prog = Prog(var_names)
prog.root = create_random_node_tree(prog, prob_term, None, max_depth, grow,
0)
return prog
class Prog(object):
def __init__(self, var_names):
self.varcount = len(var_names)
self.vars = np.zeros(self.varcount)
self.root = None
self.var_names = var_names
self.variable_indices = {}
for i in range(self.varcount):
self.variable_indices[var_names[i]] = i
self.parse_pos = 0
def clone(self):
cprog = Prog(self.var_names)
if self.root is not None:
cprog.root = self.root.clone(cprog, None)
return cprog
def eval(self):
curnode = self.root
curnode.curpos = -1
val = 0.
while curnode is not None:
curnode.curpos += 1
if curnode.curpos < curnode.stoppos:
if curnode.curpos == curnode.condpos:
if curnode.fun == EQ:
if (curnode.params[0].curval ==
curnode.params[1].curval):
curnode.stoppos = 3
else:
curnode.stoppos = 4
curnode.curpos += 1
elif curnode.fun == GRT:
if curnode.params[0].curval > curnode.params[1].curval:
curnode.stoppos = 3
else:
curnode.stoppos = 4
curnode.curpos += 1
elif curnode.fun == LRT:
if curnode.params[0].curval < curnode.params[1].curval:
curnode.stoppos = 3
else:
curnode.stoppos = 4
curnode.curpos += 1
elif curnode.fun == ZER:
if curnode.params[0].curval == 0:
curnode.stoppos = 2
else:
curnode.stoppos = 3
curnode.curpos += 1
elif curnode.fun == AFF:
g = round(curnode.params[0].curval)
id1 = round(self.vars[0])
id2 = round(self.vars[1])
if (g == 0) or ((id1 % g) == (id2 % g)):
curnode.stoppos = 2
else:
curnode.stoppos = 3
curnode.curpos += 1
# update branching info
if curnode.branching < 0:
curnode.branching = curnode.stoppos
elif curnode.branching != curnode.stoppos:
curnode.branching = 0
curnode = curnode.params[curnode.curpos]
curnode.curpos = -1
else:
if curnode.type == NodeType.FUN:
if curnode.fun == SUM:
val = (curnode.params[0].curval +
curnode.params[1].curval)
elif curnode.fun == SUB:
val = (curnode.params[0].curval -
curnode.params[1].curval)
elif curnode.fun == MUL:
val = (curnode.params[0].curval *
curnode.params[1].curval)
elif curnode.fun == DIV:
if curnode.params[1].curval == 0:
val = 0
else:
val = (curnode.params[0].curval /
curnode.params[1].curval)
elif curnode.fun == MIN:
val = curnode.params[0].curval
if curnode.params[1].curval < val:
val = curnode.params[1].curval
elif curnode.fun == MAX:
val = curnode.params[0].curval
if curnode.params[1].curval > val:
val = curnode.params[1].curval
elif curnode.fun == EXP:
try:
val = math.exp(curnode.params[0].curval)
except OverflowError:
# TODO: not sure if best solution, but using
# a very large float could lead to more overflows
val = 0
elif curnode.fun == LOG:
if curnode.params[0].curval <= 0:
val = 0
else:
val = math.log(curnode.params[0].curval)
elif curnode.fun == ABS:
val = abs(curnode.params[0].curval)
elif curnode.fun == POW:
try:
val = math.pow(curnode.params[0].curval,
curnode.params[1].curval)
except OverflowError:
# TODO: not sure if best solution, but using
# a very large float could lead to more overflows
val = 0
except ValueError:
val = 0
elif curnode.fun in {EQ, GRT, LRT, ZER, AFF}:
val = curnode.params[curnode.stoppos - 1].curval
elif curnode.type == NodeType.VAR:
val = self.vars[curnode.var]
elif curnode.type == NodeType.VAL:
val = curnode.val
# update dynamic status
if curnode.dyn_status == NodeDynStatus.UNUSED:
curnode.dyn_status = NodeDynStatus.CONSTANT
elif curnode.dyn_status == NodeDynStatus.CONSTANT:
if curnode.curval != val:
curnode.dyn_status = NodeDynStatus.DYNAMIC
# update and move to next node
curnode.curval = val
curnode = curnode.parent
return val
def write(self, file_path):
with open(file_path, 'w') as f:
f.write(str(self))
def size(self):
return self.root.size()
def node_by_pos(self, pos):
return self.root.node_by_pos(pos)
def recombine(self, parent2):
if random.randrange(2) == 0:
parent_a = parent2.clone()
parent_b = self.clone()
else:
parent_b = parent2.clone()
parent_a = self.clone()
child = parent_a.clone()
size1 = parent_a.size()
size2 = parent_b.size()
pos1 = random.randrange(size1)
pos2 = random.randrange(size2)
point1 = child.node_by_pos(pos1)
point2 = parent_b.node_by_pos(pos2)
point1parent = point1.parent
parampos = 0
# remove sub-tree from child
# find point1 position in it's parent's param array
if point1parent is not None:
for i in range(point1parent.arity()):
if point1parent.params[i] == point1:
parampos = i
# copy sub-tree from parent 2 to parent 1
point2clone = point2.clone(child, point1parent)
if point1parent is not None:
point1parent.params[parampos] = point2clone
else:
child.root = point2clone
return child
def clear_branching(self):
self.clear_branching()
def branching_distance(self, prg):
return self.root.branching_distance(prg.root)
def compare_branching(self, prg):
return self.branching_distance(prg) == 0
def dyn_pruning(self, node=None, parent=None, param_pos=-1):
if node is None:
node = self.root
else:
# nodes with constant value
if node.dyn_status == NodeDynStatus.CONSTANT:
parent[param_pos] = create_val(node.curval, self, parent)
# conditions with constant branching
if node.condpos > 0:
branch1 = node.params[node.condpos]
branch2 = node.params[node.condpos + 1]
branch = -1
if branch1.dyn_status == NodeDynStatus.UNUSED:
branch = node.condpos + 1
elif branch2.dyn_status == NodeDynStatus.UNUSED:
branch = node.condpos
if branch > 0:
node.params[branch].branching = node.branching
node.params[branch].dyn_status = node.dyn_status
parent[param_pos] = node.params[branch]
for i in range(len(node.params)):
self.dyn_pruning(node.params[i], node, i)
def build_str(self, node, indent, cur_str):
out = cur_str
ind = indent
if node.arity() > 0:
if node.parent is not None:
out = '{}\n'.format(out)
out = '{}{}('.format(out, ' ' * indent)
ind += 1
out = '{}{}'.format(out, node)
for param in node.params:
out = '{} '.format(out)
out = self.build_str(param, ind, out)
if node.arity() > 0:
out = '{})'.format(out)
return out
def __str__(self):
return self.build_str(self.root, 0, '')
|
|
"""
This module include the cost-sensitive logistic regression method.
"""
# Authors: Alejandro Correa Bahnsen <[email protected]>
# License: BSD 3 clause
import numpy as np
import math
from scipy.optimize import minimize
from sklearn.base import BaseEstimator
# from sklearn.linear_model.logistic import _intercept_dot
from pyea import GeneticAlgorithmOptimizer
from ..metrics import cost_loss
# Not in sklearn 0.15, is in 0.16-git
#TODO: replace once sklearn 0.16 is release
# The one in sklearn 0.16 return yz instead of z, therefore,
# the impact on the code should be addressed before making the change.
def _intercept_dot(w, X):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = np.dot(X, w) + c
return w, c, z
def _sigmoid(z):
""" Private function that calculate the sigmoid function """
return 1 / (1 + np.exp(-z))
def _logistic_cost_loss_i(w, X, y, cost_mat, alpha):
n_samples = X.shape[0]
w, c, z = _intercept_dot(w, X)
y_prob = _sigmoid(z)
out = cost_loss(y, y_prob, cost_mat) / n_samples
out += .5 * alpha * np.dot(w, w)
return out
def _logistic_cost_loss(w, X, y, cost_mat, alpha):
"""Computes the logistic loss.
Parameters
----------
w : array-like, shape (n_w, n_features,) or (n_w, n_features + 1,)
Coefficient vector or matrix of coefficient.
X : array-like, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
Returns
-------
out : float
Logistic loss.
"""
if w.shape[0] == w.size:
# Only evaluating one w
return _logistic_cost_loss_i(w, X, y, cost_mat, alpha)
else:
# Evaluating a set of w
n_w = w.shape[0]
out = np.zeros(n_w)
for i in range(n_w):
out[i] = _logistic_cost_loss_i(w[i], X, y, cost_mat, alpha)
return out
class CostSensitiveLogisticRegression(BaseEstimator):
"""A example-dependent cost-sensitive Logistic Regression classifier.
Parameters
----------
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
max_iter : int
Useful only for the ga and bfgs solvers. Maximum number of
iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'ga', 'bfgs'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
verbose : int, optional (default=0)
Controls the verbosity of the optimization process.
Attributes
----------
`coef_` : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
`intercept_` : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
See also
--------
sklearn.tree.DecisionTreeClassifier
References
----------
.. [1] A. Correa Bahnsen, D.Aouada, B, Ottersten,
`"Example-Dependent Cost-Sensitive Logistic Regression for Credit Scoring" <http://albahnsen.com/files/Example-Dependent%20Cost-Sensitive%20Logistic%20Regression%20for%20Credit%20Scoring_publish.pdf>`__,
in Proceedings of the International Conference on Machine Learning and Applications,
, 2014.
Examples
--------
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.cross_validation import train_test_split
>>> from costcla.datasets import load_creditscoring2
>>> from costcla.models import CostSensitiveLogisticRegression
>>> from costcla.metrics import savings_score
>>> data = load_creditscoring2()
>>> sets = train_test_split(data.data, data.target, data.cost_mat, test_size=0.33, random_state=0)
>>> X_train, X_test, y_train, y_test, cost_mat_train, cost_mat_test = sets
>>> y_pred_test_lr = LogisticRegression(random_state=0).fit(X_train, y_train).predict(X_test)
>>> f = CostSensitiveLogisticRegression()
>>> f.fit(X_train, y_train, cost_mat_train)
>>> y_pred_test_cslr = f.predict(X_test)
>>> # Savings using Logistic Regression
>>> print(savings_score(y_test, y_pred_test_lr, cost_mat_test))
0.00283419465107
>>> # Savings using Cost Sensitive Logistic Regression
>>> print(savings_score(y_test, y_pred_test_cslr, cost_mat_test))
0.142872237978
"""
def __init__(self,
C=1.0,
fit_intercept=True,
max_iter=100,
random_state=None,
solver='ga',
tol=1e-4,
verbose=0):
self.C = C
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.random_state = random_state
self.solver = solver
self.tol = tol
self.coef_ = None
self.intercept_ = 0.
self.verbose = verbose
def fit(self, X, y, cost_mat):
""" Build a example-dependent cost-sensitive logistic regression from the training set (X, y, cost_mat)
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
y : array indicator matrix
Ground truth (correct) labels.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
Returns
-------
self : object
Returns self.
"""
#TODO: Check input
n_features = X.shape[1]
if self.fit_intercept:
w0 = np.zeros(n_features + 1)
else:
w0 = np.zeros(n_features)
if self.solver == 'ga':
#TODO: add n_jobs
res = GeneticAlgorithmOptimizer(_logistic_cost_loss,
w0.shape[0],
iters=self.max_iter,
type_='cont',
n_chromosomes=100,
per_mutations=0.25,
n_elite=10,
fargs=(X, y, cost_mat, 1. / self.C),
range_=(-5, 5),
n_jobs=1,
verbose=self.verbose)
res.fit()
elif self.solver == 'bfgs':
if self.verbose > 0:
disp = True
else:
disp = False
res = minimize(_logistic_cost_loss,
w0,
method='BFGS',
args=(X, y, cost_mat, 1. / self.C),
tol=self.tol,
options={'maxiter': self.max_iter, 'disp': disp})
if self.fit_intercept:
self.coef_ = res.x[:-1]
self.intercept_ = res.x[-1]
else:
self.coef_ = res.x
def predict_proba(self, X):
"""Probability estimates.
The returned estimates.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, 2]
Returns the probability of the sample for each class in the model.
"""
y_prob = np.zeros((X.shape[0], 2))
y_prob[:, 1] = _sigmoid(np.dot(X, self.coef_) + self.intercept_)
y_prob[:, 0] = 1 - y_prob[:, 1]
return y_prob
def predict(self, X, cut_point=0.5):
"""Predicted class.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples]
Returns the prediction of the sample..
"""
return np.floor(self.predict_proba(X)[:, 1] + (1 - cut_point))
|
|
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.test import Client
from django.test.utils import override_settings
from funfactory.helpers import urlparams
from mock import patch
from nose.tools import ok_, eq_
from mozillians.common.helpers import redirect
from mozillians.common.tests import TestCase
from mozillians.users.managers import PUBLIC, MOZILLIANS, EMPLOYEES, PRIVILEGED
from mozillians.users.tests import UserFactory
class ViewProfileTests(TestCase):
@patch('mozillians.phonebook.views.messages.warning')
@patch('mozillians.phonebook.views.login_required', wraps=login_required)
def test_view_profile_no_public_anonymous(self, login_required_mock,
warning_mock):
lookup_user = UserFactory.create()
client = Client()
url = reverse('phonebook:profile_view',
kwargs={'username': lookup_user.username})
client.get(url, follow=True)
ok_(warning_mock.called)
ok_(login_required_mock.called)
@patch('mozillians.phonebook.views.messages.error')
@patch('mozillians.phonebook.views.redirect', wraps=redirect)
def test_view_profile_no_public_unvouched(self, redirect_mock, error_mock):
lookup_user = UserFactory.create()
user = UserFactory.create(vouched=False)
with self.login(user) as client:
url = reverse('phonebook:profile_view',
kwargs={'username': lookup_user.username})
client.get(url, follow=True)
ok_(redirect_mock.called)
ok_(error_mock.called)
def test_view_profile_no_public_vouched(self):
lookup_user = UserFactory.create()
user = UserFactory.create()
with self.login(user) as client:
url = reverse('phonebook:profile_view',
kwargs={'username': lookup_user.username})
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], lookup_user)
eq_(response.context['profile'], lookup_user.userprofile)
def test_view_vouched_profile_public_anonymous(self):
lookup_user = UserFactory.create(userprofile={'privacy_full_name': PUBLIC})
client = Client()
url = reverse('phonebook:profile_view',
kwargs={'username': lookup_user.username})
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], lookup_user)
eq_(response.context['profile'], lookup_user.userprofile)
eq_(response.context['profile']._privacy_level, PUBLIC)
ok_('vouch_form' not in response.context)
def test_view_vouched_profile_public_unvouched(self):
lookup_user = UserFactory.create(userprofile={'privacy_full_name': PUBLIC})
user = UserFactory.create(vouched=False)
with self.login(user) as client:
url = reverse('phonebook:profile_view',
kwargs={'username': lookup_user.username})
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], lookup_user)
eq_(response.context['profile'], lookup_user.userprofile)
eq_(response.context['profile']._privacy_level, PUBLIC)
ok_('vouch_form' not in response.context)
@override_settings(CAN_VOUCH_THRESHOLD=1)
def test_view_vouched_profile_public_vouched(self):
lookup_user = UserFactory.create(userprofile={'privacy_full_name': PUBLIC})
user = UserFactory.create()
with self.login(user) as client:
url = reverse('phonebook:profile_view',
kwargs={'username': lookup_user.username})
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], lookup_user)
eq_(response.context['profile'], lookup_user.userprofile)
eq_(response.context['profile']._privacy_level, MOZILLIANS)
ok_('vouch_form' in response.context)
def test_view_unvouched_profile_public_anonymous(self):
lookup_user = UserFactory.create(vouched=False,
userprofile={'privacy_full_name': PUBLIC})
client = Client()
url = reverse('phonebook:profile_view',
kwargs={'username': lookup_user.username})
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], lookup_user)
eq_(response.context['profile'], lookup_user.userprofile)
eq_(response.context['profile']._privacy_level, PUBLIC)
ok_('vouch_form' not in response.context)
def test_view_unvouched_profile_public_unvouched(self):
lookup_user = UserFactory.create(vouched=False,
userprofile={'privacy_full_name': PUBLIC})
user = UserFactory.create(vouched=False)
with self.login(user) as client:
url = reverse('phonebook:profile_view',
kwargs={'username': lookup_user.username})
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], lookup_user)
eq_(response.context['profile'], lookup_user.userprofile)
eq_(response.context['profile']._privacy_level, PUBLIC)
ok_('vouch_form' not in response.context)
@override_settings(CAN_VOUCH_THRESHOLD=1)
def test_view_unvouched_profile_public_vouched(self):
lookup_user = UserFactory.create(vouched=False,
userprofile={'privacy_full_name': PUBLIC})
user = UserFactory.create()
with self.login(user) as client:
url = reverse('phonebook:profile_view',
kwargs={'username': lookup_user.username})
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], lookup_user)
eq_(response.context['profile'], lookup_user.userprofile)
eq_(response.context['profile']._privacy_level, MOZILLIANS)
ok_('vouch_form' in response.context)
def test_view_profile_mine_unvouched(self):
user = UserFactory.create(vouched=False)
with self.login(user) as client:
url = reverse('phonebook:profile_view',
kwargs={'username': user.username})
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], user)
eq_(response.context['profile'], user.userprofile)
eq_(response.context['profile']._privacy_level, None)
eq_(response.context['privacy_mode'], 'myself')
def test_view_profile_mine_vouched(self):
user = UserFactory.create()
with self.login(user) as client:
url = reverse('phonebook:profile_view',
kwargs={'username': user.username})
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], user)
eq_(response.context['profile'], user.userprofile)
eq_(response.context['profile']._privacy_level, None)
eq_(response.context['privacy_mode'], 'myself')
def test_view_profile_mine_as_anonymous(self):
user = UserFactory.create()
url = reverse('phonebook:profile_view',
kwargs={'username': user.username})
url = urlparams(url, view_as='anonymous')
with self.login(user) as client:
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], user)
eq_(response.context['profile'], user.userprofile)
eq_(response.context['profile']._privacy_level, PUBLIC)
eq_(response.context['privacy_mode'], 'anonymous')
def test_view_profile_mine_as_mozillian(self):
user = UserFactory.create()
url = reverse('phonebook:profile_view',
kwargs={'username': user.username})
url = urlparams(url, view_as='mozillian')
with self.login(user) as client:
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], user)
eq_(response.context['profile'], user.userprofile)
eq_(response.context['profile']._privacy_level, MOZILLIANS)
eq_(response.context['privacy_mode'], 'mozillian')
def test_view_profile_mine_as_employee(self):
user = UserFactory.create()
url = reverse('phonebook:profile_view',
kwargs={'username': user.username})
url = urlparams(url, view_as='employee')
with self.login(user) as client:
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], user)
eq_(response.context['profile'], user.userprofile)
eq_(response.context['profile']._privacy_level, EMPLOYEES)
eq_(response.context['privacy_mode'], 'employee')
def test_view_profile_mine_as_privileged(self):
user = UserFactory.create()
url = reverse('phonebook:profile_view',
kwargs={'username': user.username})
url = urlparams(url, view_as='privileged')
with self.login(user) as client:
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], user)
eq_(response.context['profile'], user.userprofile)
eq_(response.context['profile']._privacy_level, PRIVILEGED)
eq_(response.context['privacy_mode'], 'privileged')
def test_view_profile_waiting_for_vouch_unvouched(self):
unvouched_user = UserFactory.create(vouched=False)
user = UserFactory.create(vouched=False)
url = reverse('phonebook:profile_view',
kwargs={'username': unvouched_user.username})
with self.login(user) as client:
response = client.get(url, follow=True)
ok_('vouch_form' not in response.context)
@override_settings(CAN_VOUCH_THRESHOLD=1)
def test_view_profile_waiting_for_vouch_vouched(self):
unvouched_user = UserFactory.create(vouched=False)
user = UserFactory.create()
url = reverse('phonebook:profile_view',
kwargs={'username': unvouched_user.username})
with self.login(user) as client:
response = client.get(url, follow=True)
ok_('vouch_form' in response.context)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit Tests for classes in dumping_wrapper.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import tempfile
import threading
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.wrappers import dumping_wrapper
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.debug.wrappers import hooks
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.python.training import monitored_session
@test_util.run_v1_only("b/120545219")
class DumpingDebugWrapperSessionTest(test_util.TensorFlowTestCase):
def setUp(self):
self.session_root = tempfile.mkdtemp()
self.v = variables.VariableV1(10.0, dtype=dtypes.float32, name="v")
self.delta = constant_op.constant(1.0, dtype=dtypes.float32, name="delta")
self.eta = constant_op.constant(-1.4, dtype=dtypes.float32, name="eta")
self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")
self.dec_v = state_ops.assign_add(self.v, self.eta, name="dec_v")
self.ph = array_ops.placeholder(dtypes.float32, shape=(), name="ph")
self.inc_w_ph = state_ops.assign_add(self.v, self.ph, name="inc_w_ph")
self.sess = session.Session()
self.sess.run(self.v.initializer)
def tearDown(self):
ops.reset_default_graph()
if os.path.isdir(self.session_root):
file_io.delete_recursively(self.session_root)
def _assert_correct_run_subdir_naming(self, run_subdir):
self.assertStartsWith(run_subdir, "run_")
self.assertEqual(2, run_subdir.count("_"))
self.assertGreater(int(run_subdir.split("_")[1]), 0)
def testConstructWrapperWithExistingNonEmptyRootDirRaisesException(self):
dir_path = os.path.join(self.session_root, "foo")
os.mkdir(dir_path)
self.assertTrue(os.path.isdir(dir_path))
with self.assertRaisesRegexp(
ValueError, "session_root path points to a non-empty directory"):
dumping_wrapper.DumpingDebugWrapperSession(
session.Session(), session_root=self.session_root, log_usage=False)
def testConstructWrapperWithExistingFileDumpRootRaisesException(self):
file_path = os.path.join(self.session_root, "foo")
open(file_path, "a").close() # Create the file
self.assertTrue(gfile.Exists(file_path))
self.assertFalse(gfile.IsDirectory(file_path))
with self.assertRaisesRegexp(ValueError,
"session_root path points to a file"):
dumping_wrapper.DumpingDebugWrapperSession(
session.Session(), session_root=file_path, log_usage=False)
def testConstructWrapperWithNonexistentSessionRootCreatesDirectory(self):
new_dir_path = os.path.join(tempfile.mkdtemp(), "new_dir")
dumping_wrapper.DumpingDebugWrapperSession(
session.Session(), session_root=new_dir_path, log_usage=False)
self.assertTrue(gfile.IsDirectory(new_dir_path))
# Cleanup.
gfile.DeleteRecursively(new_dir_path)
def testDumpingOnASingleRunWorks(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
self._assert_correct_run_subdir_naming(os.path.basename(dump_dirs[0]))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingOnASingleRunWorksWithRelativePathForDebugDumpDir(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
cwd = os.getcwd()
try:
os.chdir(self.session_root)
dump = debug_data.DebugDumpDir(
os.path.relpath(dump_dirs[0], self.session_root))
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
finally:
os.chdir(cwd)
def testDumpingOnASingleRunWithFeedDictWorks(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
feed_dict = {self.ph: 3.2}
sess.run(self.inc_w_ph, feed_dict=feed_dict)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
self._assert_correct_run_subdir_naming(os.path.basename(dump_dirs[0]))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_w_ph), dump.run_fetches_info)
self.assertEqual(repr(feed_dict.keys()), dump.run_feed_keys_info)
def testDumpingOnMultipleRunsWorks(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
for _ in range(3):
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
dump_dirs = sorted(
dump_dirs, key=lambda x: int(os.path.basename(x).split("_")[1]))
self.assertEqual(3, len(dump_dirs))
for i, dump_dir in enumerate(dump_dirs):
self._assert_correct_run_subdir_naming(os.path.basename(dump_dir))
dump = debug_data.DebugDumpDir(dump_dir)
self.assertAllClose([10.0 + 1.0 * i],
dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testUsingNonCallableAsWatchFnRaisesTypeError(self):
bad_watch_fn = "bad_watch_fn"
with self.assertRaisesRegexp(TypeError, "watch_fn is not callable"):
dumping_wrapper.DumpingDebugWrapperSession(
self.sess,
session_root=self.session_root,
watch_fn=bad_watch_fn,
log_usage=False)
def testDumpingWithLegacyWatchFnOnFetchesWorks(self):
"""Use a watch_fn that returns different whitelists for different runs."""
def watch_fn(fetches, feeds):
del feeds
# A watch_fn that picks fetch name.
if fetches.name == "inc_v:0":
# If inc_v, watch everything.
return "DebugIdentity", r".*", r".*"
else:
# If dec_v, watch nothing.
return "DebugIdentity", r"$^", r"$^"
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess,
session_root=self.session_root,
watch_fn=watch_fn,
log_usage=False)
for _ in range(3):
sess.run(self.inc_v)
sess.run(self.dec_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
dump_dirs = sorted(
dump_dirs, key=lambda x: int(os.path.basename(x).split("_")[1]))
self.assertEqual(6, len(dump_dirs))
for i, dump_dir in enumerate(dump_dirs):
self._assert_correct_run_subdir_naming(os.path.basename(dump_dir))
dump = debug_data.DebugDumpDir(dump_dir)
if i % 2 == 0:
self.assertGreater(dump.size, 0)
self.assertAllClose([10.0 - 0.4 * (i / 2)],
dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
else:
self.assertEqual(0, dump.size)
self.assertEqual(repr(self.dec_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingWithLegacyWatchFnWithNonDefaultDebugOpsWorks(self):
"""Use a watch_fn that specifies non-default debug ops."""
def watch_fn(fetches, feeds):
del fetches, feeds
return ["DebugIdentity", "DebugNumericSummary"], r".*", r".*"
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess,
session_root=self.session_root,
watch_fn=watch_fn,
log_usage=False)
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(14,
len(dump.get_tensors("v", 0, "DebugNumericSummary")[0]))
def testDumpingWithWatchFnWithNonDefaultDebugOpsWorks(self):
"""Use a watch_fn that specifies non-default debug ops."""
def watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(
debug_ops=["DebugIdentity", "DebugNumericSummary"],
node_name_regex_whitelist=r"^v.*",
op_type_regex_whitelist=r".*",
tensor_dtype_regex_whitelist=".*_ref")
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess,
session_root=self.session_root,
watch_fn=watch_fn,
log_usage=False)
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(14,
len(dump.get_tensors("v", 0, "DebugNumericSummary")[0]))
dumped_nodes = [dump.node_name for dump in dump.dumped_tensor_data]
self.assertNotIn("inc_v", dumped_nodes)
self.assertNotIn("delta", dumped_nodes)
def testDumpingDebugHookWithoutWatchFnWorks(self):
dumping_hook = hooks.DumpingDebugHook(self.session_root, log_usage=False)
mon_sess = monitored_session._HookedSession(self.sess, [dumping_hook])
mon_sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
self._assert_correct_run_subdir_naming(os.path.basename(dump_dirs[0]))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingDebugHookWithStatefulWatchFnWorks(self):
watch_fn_state = {"run_counter": 0}
def counting_watch_fn(fetches, feed_dict):
del fetches, feed_dict
watch_fn_state["run_counter"] += 1
if watch_fn_state["run_counter"] % 2 == 1:
# If odd-index run (1-based), watch every ref-type tensor.
return framework.WatchOptions(
debug_ops="DebugIdentity",
tensor_dtype_regex_whitelist=".*_ref")
else:
# If even-index run, watch nothing.
return framework.WatchOptions(
debug_ops="DebugIdentity",
node_name_regex_whitelist=r"^$",
op_type_regex_whitelist=r"^$")
dumping_hook = hooks.DumpingDebugHook(
self.session_root, watch_fn=counting_watch_fn, log_usage=False)
mon_sess = monitored_session._HookedSession(self.sess, [dumping_hook])
for _ in range(4):
mon_sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
dump_dirs = sorted(
dump_dirs, key=lambda x: int(os.path.basename(x).split("_")[1]))
self.assertEqual(4, len(dump_dirs))
for i, dump_dir in enumerate(dump_dirs):
self._assert_correct_run_subdir_naming(os.path.basename(dump_dir))
dump = debug_data.DebugDumpDir(dump_dir)
if i % 2 == 0:
self.assertAllClose([10.0 + 1.0 * i],
dump.get_tensors("v", 0, "DebugIdentity"))
self.assertNotIn("delta",
[datum.node_name for datum in dump.dumped_tensor_data])
else:
self.assertEqual(0, dump.size)
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingDebugHookWithStatefulLegacyWatchFnWorks(self):
watch_fn_state = {"run_counter": 0}
def counting_watch_fn(fetches, feed_dict):
del fetches, feed_dict
watch_fn_state["run_counter"] += 1
if watch_fn_state["run_counter"] % 2 == 1:
# If odd-index run (1-based), watch everything.
return "DebugIdentity", r".*", r".*"
else:
# If even-index run, watch nothing.
return "DebugIdentity", r"$^", r"$^"
dumping_hook = hooks.DumpingDebugHook(
self.session_root, watch_fn=counting_watch_fn, log_usage=False)
mon_sess = monitored_session._HookedSession(self.sess, [dumping_hook])
for _ in range(4):
mon_sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
dump_dirs = sorted(
dump_dirs, key=lambda x: int(os.path.basename(x).split("_")[1]))
self.assertEqual(4, len(dump_dirs))
for i, dump_dir in enumerate(dump_dirs):
self._assert_correct_run_subdir_naming(os.path.basename(dump_dir))
dump = debug_data.DebugDumpDir(dump_dir)
if i % 2 == 0:
self.assertAllClose([10.0 + 1.0 * i],
dump.get_tensors("v", 0, "DebugIdentity"))
else:
self.assertEqual(0, dump.size)
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingFromMultipleThreadsObeysThreadNameFilter(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False,
thread_name_filter=r"MainThread$")
self.assertAllClose(1.0, sess.run(self.delta))
child_thread_result = []
def child_thread_job():
child_thread_result.append(sess.run(self.eta))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
thread.join()
self.assertAllClose([-1.4], child_thread_result)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertEqual(1, dump.size)
self.assertEqual("delta", dump.dumped_tensor_data[0].node_name)
def testDumpingWrapperWithEmptyFetchWorks(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
sess.run([])
if __name__ == "__main__":
googletest.main()
|
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
import optparse
import os
import pickle
import sys
from troveclient.compat import client
from troveclient.compat.xml import TroveXmlClient
from troveclient.compat import exceptions
from urllib import quote
def methods_of(obj):
"""Get all callable methods of an object that don't start with underscore
returns a list of tuples of the form (method_name, method)"""
result = {}
for i in dir(obj):
if callable(getattr(obj, i)) and not i.startswith('_'):
result[i] = getattr(obj, i)
return result
def check_for_exceptions(resp, body):
if resp.status in (400, 422, 500):
raise exceptions.from_response(resp, body)
def print_actions(cmd, actions):
"""Print help for the command with list of options and description"""
print ("Available actions for '%s' cmd:") % cmd
for k, v in actions.iteritems():
print "\t%-20s%s" % (k, v.__doc__)
sys.exit(2)
def print_commands(commands):
"""Print the list of available commands and description"""
print "Available commands"
for k, v in commands.iteritems():
print "\t%-20s%s" % (k, v.__doc__)
sys.exit(2)
def limit_url(url, limit=None, marker=None):
if not limit and not marker:
return url
query = []
if marker:
query.append("marker=%s" % marker)
if limit:
query.append("limit=%s" % limit)
query = '?' + '&'.join(query)
return url + query
def quote_user_host(user, host):
quoted = ''
if host:
quoted = quote("%s@%s" % (user, host))
else:
quoted = quote("%s" % user)
return quoted.replace('.', '%2e')
class CliOptions(object):
"""A token object containing the user, apikey and token which
is pickleable."""
APITOKEN = os.path.expanduser("~/.apitoken")
DEFAULT_VALUES = {
'username': None,
'apikey': None,
'tenant_id': None,
'auth_url': None,
'auth_type': 'keystone',
'service_type': 'database',
'service_name': '',
'region': 'RegionOne',
'service_url': None,
'insecure': False,
'verbose': False,
'debug': False,
'token': None,
'xml': None,
}
def __init__(self, **kwargs):
for key, value in self.DEFAULT_VALUES.items():
setattr(self, key, value)
@classmethod
def default(cls):
kwargs = copy.deepcopy(cls.DEFAULT_VALUES)
return cls(**kwargs)
@classmethod
def load_from_file(cls):
try:
with open(cls.APITOKEN, 'rb') as token:
return pickle.load(token)
except IOError:
pass # File probably not found.
except:
print("ERROR: Token file found at %s was corrupt." % cls.APITOKEN)
return cls.default()
@classmethod
def save_from_instance_fields(cls, instance):
apitoken = cls.default()
for key, default_value in cls.DEFAULT_VALUES.items():
final_value = getattr(instance, key, default_value)
setattr(apitoken, key, final_value)
with open(cls.APITOKEN, 'wb') as token:
pickle.dump(apitoken, token, protocol=2)
@classmethod
def create_optparser(cls, load_file):
oparser = optparse.OptionParser(
usage="%prog [options] <cmd> <action> <args>",
version='1.0', conflict_handler='resolve')
if load_file:
file = cls.load_from_file()
else:
file = cls.default()
def add_option(*args, **kwargs):
if len(args) == 1:
name = args[0]
else:
name = args[1]
kwargs['default'] = getattr(file, name, cls.DEFAULT_VALUES[name])
oparser.add_option("--%s" % name, **kwargs)
add_option("verbose", action="store_true",
help="Show equivalent curl statement along "
"with actual HTTP communication.")
add_option("debug", action="store_true",
help="Show the stack trace on errors.")
add_option("auth_url", help="Auth API endpoint URL with port and "
"version. Default: http://localhost:5000/v2.0")
add_option("username", help="Login username")
add_option("apikey", help="Api key")
add_option("tenant_id",
help="Tenant Id associated with the account")
add_option("auth_type",
help="Auth type to support different auth environments, \
Supported values are 'keystone', 'rax'.")
add_option("service_type",
help="Service type is a name associated for the catalog")
add_option("service_name",
help="Service name as provided in the service catalog")
add_option("service_url",
help="Service endpoint to use "
"if the catalog doesn't have one.")
add_option("region", help="Region the service is located in")
add_option("insecure", action="store_true",
help="Run in insecure mode for https endpoints.")
add_option("token", help="Token from a prior login.")
add_option("xml", action="store_true", help="Changes format to XML.")
oparser.add_option("--secure", action="store_false", dest="insecure",
help="Run in insecure mode for https endpoints.")
oparser.add_option("--json", action="store_false", dest="xml",
help="Changes format to JSON.")
oparser.add_option("--terse", action="store_false", dest="verbose",
help="Toggles verbose mode off.")
oparser.add_option("--hide-debug", action="store_false", dest="debug",
help="Toggles debug mode off.")
return oparser
class ArgumentRequired(Exception):
def __init__(self, param):
self.param = param
def __str__(self):
return 'Argument "--%s" required.' % self.param
class ArgumentsRequired(ArgumentRequired):
def __init__(self, *params):
self.params = params
def __str__(self):
returnstring = 'Specify at least one of these arguments: '
for param in self.params:
returnstring = returnstring + '"--%s" ' % param
return returnstring
class CommandsBase(object):
params = []
def __init__(self, parser):
self._parse_options(parser)
def _get_client(self):
"""Creates the all important client object."""
try:
if self.xml:
client_cls = TroveXmlClient
else:
client_cls = client.TroveHTTPClient
if self.verbose:
client.log_to_streamhandler(sys.stdout)
client.RDC_PP = True
return client.Dbaas(self.username, self.apikey, self.tenant_id,
auth_url=self.auth_url,
auth_strategy=self.auth_type,
service_type=self.service_type,
service_name=self.service_name,
region_name=self.region,
service_url=self.service_url,
insecure=self.insecure,
client_cls=client_cls)
except:
if self.debug:
raise
print sys.exc_info()[1]
def _safe_exec(self, func, *args, **kwargs):
if not self.debug:
try:
return func(*args, **kwargs)
except:
print(sys.exc_info()[1])
return None
else:
return func(*args, **kwargs)
@classmethod
def _prepare_parser(cls, parser):
for param in cls.params:
parser.add_option("--%s" % param)
def _parse_options(self, parser):
opts, args = parser.parse_args()
for param in opts.__dict__:
value = getattr(opts, param)
setattr(self, param, value)
def _require(self, *params):
for param in params:
if not hasattr(self, param):
raise ArgumentRequired(param)
if not getattr(self, param):
raise ArgumentRequired(param)
def _require_at_least_one_of(self, *params):
# One or more of params is required to be present.
argument_present = False
for param in params:
if hasattr(self, param):
if getattr(self, param):
argument_present = True
if argument_present is False:
raise ArgumentsRequired(*params)
def _make_list(self, *params):
# Convert the listed params to lists.
for param in params:
raw = getattr(self, param)
if isinstance(raw, list):
return
raw = [item.strip() for item in raw.split(',')]
setattr(self, param, raw)
def _pretty_print(self, func, *args, **kwargs):
if self.verbose:
self._safe_exec(func, *args, **kwargs)
return # Skip this, since the verbose stuff will show up anyway.
def wrapped_func():
result = func(*args, **kwargs)
if result:
print(json.dumps(result._info, sort_keys=True, indent=4))
else:
print("OK")
self._safe_exec(wrapped_func)
def _dumps(self, item):
return json.dumps(item, sort_keys=True, indent=4)
def _pretty_list(self, func, *args, **kwargs):
result = self._safe_exec(func, *args, **kwargs)
if self.verbose:
return
if result and len(result) > 0:
for item in result:
print(self._dumps(item._info))
else:
print("OK")
def _pretty_paged(self, func, *args, **kwargs):
try:
limit = self.limit
if limit:
limit = int(limit, 10)
result = func(*args, limit=limit, marker=self.marker, **kwargs)
if self.verbose:
return # Verbose already shows the output, so skip this.
if result and len(result) > 0:
for item in result:
print self._dumps(item._info)
if result.links:
print("Links:")
for link in result.links:
print self._dumps((link))
else:
print("OK")
except:
if self.debug:
raise
print sys.exc_info()[1]
class Auth(CommandsBase):
"""Authenticate with your username and api key"""
params = [
'apikey',
'auth_strategy',
'auth_type',
'auth_url',
'options',
'region',
'service_name',
'service_type',
'service_url',
'tenant_id',
'username',
]
def __init__(self, parser):
super(Auth, self).__init__(parser)
self.dbaas = None
def login(self):
"""Login to retrieve an auth token to use for other api calls"""
self._require('username', 'apikey', 'tenant_id', 'auth_url')
try:
self.dbaas = self._get_client()
self.dbaas.authenticate()
self.token = self.dbaas.client.auth_token
self.service_url = self.dbaas.client.service_url
CliOptions.save_from_instance_fields(self)
print("Token aquired! Saving to %s..." % CliOptions.APITOKEN)
print(" service_url = %s" % self.service_url)
print(" token = %s" % self.token)
except:
if self.debug:
raise
print sys.exc_info()[1]
class AuthedCommandsBase(CommandsBase):
"""Commands that work only with an authicated client."""
def __init__(self, parser):
"""Makes sure a token is available somehow and logs in."""
super(AuthedCommandsBase, self).__init__(parser)
try:
self._require('token')
except ArgumentRequired:
if self.debug:
raise
print('No token argument supplied. Use the "auth login" command '
'to log in and get a token.\n')
sys.exit(1)
try:
self._require('service_url')
except ArgumentRequired:
if self.debug:
raise
print('No service_url given.\n')
sys.exit(1)
self.dbaas = self._get_client()
# Actually set the token to avoid a re-auth.
self.dbaas.client.auth_token = self.token
self.dbaas.client.authenticate_with_token(self.token, self.service_url)
class Paginated(object):
""" Pretends to be a list if you iterate over it, but also keeps a
next property you can use to get the next page of data. """
def __init__(self, items=[], next_marker=None, links=[]):
self.items = items
self.next = next_marker
self.links = links
def __len__(self):
return len(self.items)
def __iter__(self):
return self.items.__iter__()
def __getitem__(self, key):
return self.items[key]
def __setitem__(self, key, value):
self.items[key] = value
def __delitem__(self, key):
del self.items[key]
def __reversed__(self):
return reversed(self.items)
def __contains__(self, needle):
return needle in self.items
|
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#PY25 compatible for GAE.
#
# Copyright 2009 Google Inc. All Rights Reserved.
"""Code for encoding protocol message primitives.
Contains the logic for encoding every logical protocol field type
into one of the 5 physical wire types.
This code is designed to push the Python interpreter's performance to the
limits.
The basic idea is that at startup time, for every field (i.e. every
FieldDescriptor) we construct two functions: a "sizer" and an "encoder". The
sizer takes a value of this field's type and computes its byte size. The
encoder takes a writer function and a value. It encodes the value into byte
strings and invokes the writer function to write those strings. Typically the
writer function is the write() method of a cStringIO.
We try to do as much work as possible when constructing the writer and the
sizer rather than when calling them. In particular:
* We copy any needed global functions to local variables, so that we do not need
to do costly global table lookups at runtime.
* Similarly, we try to do any attribute lookups at startup time if possible.
* Every field's tag is encoded to bytes at startup, since it can't change at
runtime.
* Whatever component of the field size we can compute at startup, we do.
* We *avoid* sharing code if doing so would make the code slower and not sharing
does not burden us too much. For example, encoders for repeated fields do
not just call the encoders for singular fields in a loop because this would
add an extra function call overhead for every loop iteration; instead, we
manually inline the single-value encoder into the loop.
* If a Python function lacks a return statement, Python actually generates
instructions to pop the result of the last statement off the stack, push
None onto the stack, and then return that. If we really don't care what
value is returned, then we can save two instructions by returning the
result of the last statement. It looks funny but it helps.
* We assume that type and bounds checking has happened at a higher level.
"""
__author__ = '[email protected] (Kenton Varda)'
import struct
import sys ##PY25
_PY2 = sys.version_info[0] < 3 ##PY25
import wire_format
# This will overflow and thus become IEEE-754 "infinity". We would use
# "float('inf')" but it doesn't work on Windows pre-Python-2.6.
_POS_INF = 1e10000
_NEG_INF = -_POS_INF
def _VarintSize(value):
"""Compute the size of a varint value."""
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10
def _SignedVarintSize(value):
"""Compute the size of a signed varint value."""
if value < 0: return 10
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10
def _TagSize(field_number):
"""Returns the number of bytes required to serialize a tag with this field
number."""
# Just pass in type 0, since the type won't affect the tag+type size.
return _VarintSize(wire_format.PackTag(field_number, 0))
# --------------------------------------------------------------------
# In this section we define some generic sizers. Each of these functions
# takes parameters specific to a particular field type, e.g. int32 or fixed64.
# It returns another function which in turn takes parameters specific to a
# particular field, e.g. the field number and whether it is repeated or packed.
# Look at the next section to see how these are used.
def _SimpleSizer(compute_value_size):
"""A sizer which uses the function compute_value_size to compute the size of
each value. Typically compute_value_size is _VarintSize."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = 0
for element in value:
result += compute_value_size(element)
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += compute_value_size(element)
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + compute_value_size(value)
return FieldSize
return SpecificSizer
def _ModifiedSizer(compute_value_size, modify_value):
"""Like SimpleSizer, but modify_value is invoked on each value before it is
passed to compute_value_size. modify_value is typically ZigZagEncode."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = 0
for element in value:
result += compute_value_size(modify_value(element))
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += compute_value_size(modify_value(element))
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + compute_value_size(modify_value(value))
return FieldSize
return SpecificSizer
def _FixedSizer(value_size):
"""Like _SimpleSizer except for a fixed-size field. The input is the size
of one value."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = len(value) * value_size
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
element_size = value_size + tag_size
def RepeatedFieldSize(value):
return len(value) * element_size
return RepeatedFieldSize
else:
field_size = value_size + tag_size
def FieldSize(value):
return field_size
return FieldSize
return SpecificSizer
# ====================================================================
# Here we declare a sizer constructor for each field type. Each "sizer
# constructor" is a function that takes (field_number, is_repeated, is_packed)
# as parameters and returns a sizer, which in turn takes a field value as
# a parameter and returns its encoded size.
Int32Sizer = Int64Sizer = EnumSizer = _SimpleSizer(_SignedVarintSize)
UInt32Sizer = UInt64Sizer = _SimpleSizer(_VarintSize)
SInt32Sizer = SInt64Sizer = _ModifiedSizer(
_SignedVarintSize, wire_format.ZigZagEncode)
Fixed32Sizer = SFixed32Sizer = FloatSizer = _FixedSizer(4)
Fixed64Sizer = SFixed64Sizer = DoubleSizer = _FixedSizer(8)
BoolSizer = _FixedSizer(1)
def StringSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a string field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
local_len = len
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = local_len(element.encode('utf-8'))
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = local_len(value.encode('utf-8'))
return tag_size + local_VarintSize(l) + l
return FieldSize
def BytesSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a bytes field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
local_len = len
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = local_len(element)
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = local_len(value)
return tag_size + local_VarintSize(l) + l
return FieldSize
def GroupSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a group field."""
tag_size = _TagSize(field_number) * 2
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += element.ByteSize()
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + value.ByteSize()
return FieldSize
def MessageSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a message field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = element.ByteSize()
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = value.ByteSize()
return tag_size + local_VarintSize(l) + l
return FieldSize
# --------------------------------------------------------------------
# MessageSet is special.
def MessageSetItemSizer(field_number):
"""Returns a sizer for extensions of MessageSet.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
static_size = (_TagSize(1) * 2 + _TagSize(2) + _VarintSize(field_number) +
_TagSize(3))
local_VarintSize = _VarintSize
def FieldSize(value):
l = value.ByteSize()
return static_size + local_VarintSize(l) + l
return FieldSize
# ====================================================================
# Encoders!
def _VarintEncoder():
"""Return an encoder for a basic varint value (does not include tag)."""
local_chr = _PY2 and chr or (lambda x: bytes((x,))) ##PY25
##!PY25 local_chr = chr if bytes is str else lambda x: bytes((x,))
def EncodeVarint(write, value):
bits = value & 0x7f
value >>= 7
while value:
write(local_chr(0x80|bits))
bits = value & 0x7f
value >>= 7
return write(local_chr(bits))
return EncodeVarint
def _SignedVarintEncoder():
"""Return an encoder for a basic signed varint value (does not include
tag)."""
local_chr = _PY2 and chr or (lambda x: bytes((x,))) ##PY25
##!PY25 local_chr = chr if bytes is str else lambda x: bytes((x,))
def EncodeSignedVarint(write, value):
if value < 0:
value += (1 << 64)
bits = value & 0x7f
value >>= 7
while value:
write(local_chr(0x80|bits))
bits = value & 0x7f
value >>= 7
return write(local_chr(bits))
return EncodeSignedVarint
_EncodeVarint = _VarintEncoder()
_EncodeSignedVarint = _SignedVarintEncoder()
def _VarintBytes(value):
"""Encode the given integer as a varint and return the bytes. This is only
called at startup time so it doesn't need to be fast."""
pieces = []
_EncodeVarint(pieces.append, value)
return "".encode("latin1").join(pieces) ##PY25
##!PY25 return b"".join(pieces)
def TagBytes(field_number, wire_type):
"""Encode the given tag and return the bytes. Only called at startup."""
return _VarintBytes(wire_format.PackTag(field_number, wire_type))
# --------------------------------------------------------------------
# As with sizers (see above), we have a number of common encoder
# implementations.
def _SimpleEncoder(wire_type, encode_value, compute_value_size):
"""Return a constructor for an encoder for fields of a particular type.
Args:
wire_type: The field's wire type, for encoding tags.
encode_value: A function which encodes an individual value, e.g.
_EncodeVarint().
compute_value_size: A function which computes the size of an individual
value, e.g. _VarintSize().
"""
def SpecificEncoder(field_number, is_repeated, is_packed):
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
size = 0
for element in value:
size += compute_value_size(element)
local_EncodeVarint(write, size)
for element in value:
encode_value(write, element)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
encode_value(write, element)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return encode_value(write, value)
return EncodeField
return SpecificEncoder
def _ModifiedEncoder(wire_type, encode_value, compute_value_size, modify_value):
"""Like SimpleEncoder but additionally invokes modify_value on every value
before passing it to encode_value. Usually modify_value is ZigZagEncode."""
def SpecificEncoder(field_number, is_repeated, is_packed):
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
size = 0
for element in value:
size += compute_value_size(modify_value(element))
local_EncodeVarint(write, size)
for element in value:
encode_value(write, modify_value(element))
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
encode_value(write, modify_value(element))
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return encode_value(write, modify_value(value))
return EncodeField
return SpecificEncoder
def _StructPackEncoder(wire_type, format):
"""Return a constructor for an encoder for a fixed-width field.
Args:
wire_type: The field's wire type, for encoding tags.
format: The format string to pass to struct.pack().
"""
value_size = struct.calcsize(format)
def SpecificEncoder(field_number, is_repeated, is_packed):
local_struct_pack = struct.pack
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value) * value_size)
for element in value:
write(local_struct_pack(format, element))
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
write(local_struct_pack(format, element))
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return write(local_struct_pack(format, value))
return EncodeField
return SpecificEncoder
def _FloatingPointEncoder(wire_type, format):
"""Return a constructor for an encoder for float fields.
This is like StructPackEncoder, but catches errors that may be due to
passing non-finite floating-point values to struct.pack, and makes a
second attempt to encode those values.
Args:
wire_type: The field's wire type, for encoding tags.
format: The format string to pass to struct.pack().
"""
b = _PY2 and (lambda x:x) or (lambda x:x.encode('latin1')) ##PY25
value_size = struct.calcsize(format)
if value_size == 4:
def EncodeNonFiniteOrRaise(write, value):
# Remember that the serialized form uses little-endian byte order.
if value == _POS_INF:
write(b('\x00\x00\x80\x7F')) ##PY25
##!PY25 write(b'\x00\x00\x80\x7F')
elif value == _NEG_INF:
write(b('\x00\x00\x80\xFF')) ##PY25
##!PY25 write(b'\x00\x00\x80\xFF')
elif value != value: # NaN
write(b('\x00\x00\xC0\x7F')) ##PY25
##!PY25 write(b'\x00\x00\xC0\x7F')
else:
raise
elif value_size == 8:
def EncodeNonFiniteOrRaise(write, value):
if value == _POS_INF:
write(b('\x00\x00\x00\x00\x00\x00\xF0\x7F')) ##PY25
##!PY25 write(b'\x00\x00\x00\x00\x00\x00\xF0\x7F')
elif value == _NEG_INF:
write(b('\x00\x00\x00\x00\x00\x00\xF0\xFF')) ##PY25
##!PY25 write(b'\x00\x00\x00\x00\x00\x00\xF0\xFF')
elif value != value: # NaN
write(b('\x00\x00\x00\x00\x00\x00\xF8\x7F')) ##PY25
##!PY25 write(b'\x00\x00\x00\x00\x00\x00\xF8\x7F')
else:
raise
else:
raise ValueError('Can\'t encode floating-point values that are '
'%d bytes long (only 4 or 8)' % value_size)
def SpecificEncoder(field_number, is_repeated, is_packed):
local_struct_pack = struct.pack
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value) * value_size)
for element in value:
# This try/except block is going to be faster than any code that
# we could write to check whether element is finite.
try:
write(local_struct_pack(format, element))
except SystemError:
EncodeNonFiniteOrRaise(write, element)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
try:
write(local_struct_pack(format, element))
except SystemError:
EncodeNonFiniteOrRaise(write, element)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
try:
write(local_struct_pack(format, value))
except SystemError:
EncodeNonFiniteOrRaise(write, value)
return EncodeField
return SpecificEncoder
# ====================================================================
# Here we declare an encoder constructor for each field type. These work
# very similarly to sizer constructors, described earlier.
Int32Encoder = Int64Encoder = EnumEncoder = _SimpleEncoder(
wire_format.WIRETYPE_VARINT, _EncodeSignedVarint, _SignedVarintSize)
UInt32Encoder = UInt64Encoder = _SimpleEncoder(
wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize)
SInt32Encoder = SInt64Encoder = _ModifiedEncoder(
wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize,
wire_format.ZigZagEncode)
# Note that Python conveniently guarantees that when using the '<' prefix on
# formats, they will also have the same size across all platforms (as opposed
# to without the prefix, where their sizes depend on the C compiler's basic
# type sizes).
Fixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<I')
Fixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<Q')
SFixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<i')
SFixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<q')
FloatEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED32, '<f')
DoubleEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED64, '<d')
def BoolEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a boolean field."""
##!PY25 false_byte = b'\x00'
##!PY25 true_byte = b'\x01'
false_byte = '\x00'.encode('latin1') ##PY25
true_byte = '\x01'.encode('latin1') ##PY25
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value))
for element in value:
if element:
write(true_byte)
else:
write(false_byte)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
if element:
write(true_byte)
else:
write(false_byte)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeField(write, value):
write(tag_bytes)
if value:
return write(true_byte)
return write(false_byte)
return EncodeField
def StringEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a string field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
local_len = len
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
encoded = element.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded))
write(encoded)
return EncodeRepeatedField
else:
def EncodeField(write, value):
encoded = value.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded))
return write(encoded)
return EncodeField
def BytesEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a bytes field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
local_len = len
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(tag)
local_EncodeVarint(write, local_len(element))
write(element)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(tag)
local_EncodeVarint(write, local_len(value))
return write(value)
return EncodeField
def GroupEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a group field."""
start_tag = TagBytes(field_number, wire_format.WIRETYPE_START_GROUP)
end_tag = TagBytes(field_number, wire_format.WIRETYPE_END_GROUP)
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(start_tag)
element._InternalSerialize(write)
write(end_tag)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(start_tag)
value._InternalSerialize(write)
return write(end_tag)
return EncodeField
def MessageEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a message field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(tag)
local_EncodeVarint(write, element.ByteSize())
element._InternalSerialize(write)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(tag)
local_EncodeVarint(write, value.ByteSize())
return value._InternalSerialize(write)
return EncodeField
# --------------------------------------------------------------------
# As before, MessageSet is special.
def MessageSetItemEncoder(field_number):
"""Encoder for extensions of MessageSet.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
start_bytes = "".encode("latin1").join([ ##PY25
##!PY25 start_bytes = b"".join([
TagBytes(1, wire_format.WIRETYPE_START_GROUP),
TagBytes(2, wire_format.WIRETYPE_VARINT),
_VarintBytes(field_number),
TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)])
end_bytes = TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_EncodeVarint = _EncodeVarint
def EncodeField(write, value):
write(start_bytes)
local_EncodeVarint(write, value.ByteSize())
value._InternalSerialize(write)
return write(end_bytes)
return EncodeField
|
|
"""
Unit tests for sheet and sheetcoords.
$Id$
"""
__version__='$Revision$'
import unittest
from numpy import array
from topo.base.sheetcoords import SheetCoordinateSystem,Slice
from topo.base.sheet import *
from topo.base.boundingregion import BoundingBox
from topo.base.sheetview import SheetView
# CEBALERT:
# Changes that need to be made to this file:
# - don't create a new ct every time, just change its density
# - test array versions of coordinate transform functions
# - ensure methods of Slice are tested
class TestCoordinateTransforms(unittest.TestCase):
"""
Tests for sheet.
Subclassed for use; the subclasses have a setUp method to create
the particular coordinates to use each time.
"""
def makeBox(self):
self.box = BoundingBox(points=((self.left,self.bottom),
(self.right,self.top)))
self.ct = SheetCoordinateSystem(self.box,self.density,self.density)
# float bounds for matrix coordinates: these
# values are actually outside the matrix
self.rbound = self.density*(self.top-self.bottom)
self.cbound = self.density*(self.right-self.left)
#self.cbound = int(self.density*(self.right-self.left)) / float((self.right-self.left))
#self.rbound = int(self.density*(self.top-self.bottom)) / float((self.top-self.bottom))
# CEBALERT: this is supposed to be a small distance
D = 0.00001
# Sheet values around the edge of the BoundingBox
self.just_in_right_x = self.right - D
self.just_in_bottom_y = self.bottom + D
self.just_out_top_y = self.top + D
self.just_out_left_x = self.left - D
# Matrix values around the edge of the matrix
self.just_out_right_idx = self.rbound + D
self.just_out_bottom_idx = self.cbound + D
self.just_out_top_idx = 0.0 - D
self.just_out_left_idx = 0.0 - D
### sheet2matrix() tests
#
def test_sheet2matrix_center(self):
"""
Check that the center of the Sheet corresponds to the center
of the matrix.
"""
x_center = self.left+(self.right-self.left)/2.0
y_center = self.bottom+(self.top-self.bottom)/2.0
row, col = self.ct.sheet2matrix(x_center,y_center)
self.assertEqual((row,col),(self.rbound/2.0,self.cbound/2.0))
def test_sheet2matrix_left_top(self):
"""
Check that the top-left of the Sheet is [0,0] in matrix
coordinates.
"""
row, col = self.ct.sheet2matrix(self.left,self.top)
self.assertEqual((row,col),(0,0))
def test_sheet2matrix_right_bottom(self):
"""
Check that the bottom-right of the Sheet is [rbound,cbound] in matrix
coordinates.
"""
row, col = self.ct.sheet2matrix(self.right,self.bottom)
self.assertEqual((row,col),(self.rbound,self.cbound))
def test_sheet2matrix_matrix2sheet(self):
"""
Check that matrix2sheet() is the inverse of sheet2matrix().
"""
# top-right corner
row, col = self.ct.sheet2matrix(self.right,self.top)
x_right, y_top = self.ct.matrix2sheet(row,col)
self.assertEqual((x_right,y_top),(self.right,self.top))
# bottom-left corner
row, col = self.ct.sheet2matrix(self.left,self.bottom)
x_left, y_bottom = self.ct.matrix2sheet(row,col)
self.assertEqual((x_left,y_bottom),(self.left,self.bottom))
def test_matrix2sheet_sheet2matrix(self):
"""
Check that sheet2matrix() is the inverse of matrix2sheet().
"""
# top-right corner
x,y = self.ct.matrix2sheet(float(0),float(self.last_col))
top_row,right_col = self.ct.sheet2matrix(x,y)
self.assertEqual((top_row,right_col),(float(0),float(self.last_col)))
# bottom-left corner
x,y = self.ct.matrix2sheet(float(self.last_row),float(0))
bottom_row,left_col = self.ct.sheet2matrix(x,y)
self.assertEqual((bottom_row,left_col),(float(self.last_row),float(0)))
### sheet2matrixidx() tests
#
def test_sheet2matrixidx_left_top(self):
"""
Test a point just inside the top-left corner of the BoundingBox, and
one just outside.
"""
# inside
r,c = 0,0
x,y = self.left,self.top
self.assertEqual(self.ct.sheet2matrixidx(x,y), (r,c))
# outside
r,c = -1,-1
x,y = self.just_out_left_x,self.just_out_top_y
self.assertEqual(self.ct.sheet2matrixidx(x,y), (r,c))
def test_sheet2matrixidx_left_bottom(self):
"""
Test a point just inside the left-bottom corner of the BoundingBox, and
one just outside.
"""
# inside
r,c = self.last_row, 0
x,y = self.left, self.just_in_bottom_y
self.assertEqual(self.ct.sheet2matrixidx(x,y),(r,c))
# outside
r,c = self.last_row+1, -1
x,y = self.just_out_left_x, self.bottom
self.assertEqual(self.ct.sheet2matrixidx(x,y),(r,c))
def test_sheet2matrixidx_right_top(self):
"""
Test a point just inside the top-right corner of the BoundingBox, and
one just outside.
"""
# inside
r,c = 0,self.last_col
x,y = self.just_in_right_x,self.top
self.assertEqual(self.ct.sheet2matrixidx(x,y),(r,c))
# outside
r,c = -1,self.last_col+1
x,y = self.right,self.just_out_top_y
self.assertEqual(self.ct.sheet2matrixidx(x,y),(r,c))
def test_sheet2matrixidx_right_bottom(self):
"""
Test a point just inside the bottom-right corner of the BoundingBox,
and the corner itself - which should not be inside.
"""
# inside
r,c = self.last_row,self.last_col
x,y = self.just_in_right_x,self.just_in_bottom_y
self.assertEqual(self.ct.sheet2matrixidx(x,y),(r,c))
# not inside
r,c = self.last_row+1,self.last_col+1
x,y = self.right,self.bottom
self.assertEqual(self.ct.sheet2matrixidx(x,y),(r,c))
### matrix2sheet() tests
#
def test_matrix2sheet_left_top(self):
"""
Check that Sheet's (0,0) is the top-left of the matrix.
Check that just outside the top-left in matrix coordinates
comes back to Sheet coordinates that are outside the
BoundingBox.
"""
x,y = self.ct.matrix2sheet(0,0)
self.assertEqual((x,y), (self.left,self.top))
x,y = self.ct.matrix2sheet(self.just_out_left_idx,self.just_out_top_idx)
self.assertFalse(self.box.contains(x,y))
def test_matrix2sheet_right_bottom(self):
"""
Check that Sheet's (right,bottom) is the bottom-right in
matrix coordinates i.e. [rbound,cbound]
Check that just outside the bottom-right in matrix coordinates
comes back to Sheet coordinates that are outside the
BoundingBox.
"""
x,y = self.ct.matrix2sheet(self.rbound,self.cbound)
self.assertEqual((x,y), (self.right,self.bottom))
x,y = self.ct.matrix2sheet(self.just_out_right_idx,self.just_out_bottom_idx)
self.assertFalse(self.box.contains(x,y))
def test_matrix2sheet_center(self):
"""
Check that the center in Sheet coordinates corresponds to
the center in continuous matrix coordinates.
"""
x_center = self.left+(self.right-self.left)/2.0
y_center = self.bottom+(self.top-self.bottom)/2.0
center_float_row = self.rbound/2.0
center_float_col = self.cbound/2.0
x,y = self.ct.matrix2sheet(center_float_row,center_float_col)
self.assertEqual((x,y),(x_center,y_center))
### matrixidx2sheet() tests
#
def test_matrixidx2sheet_left_top(self):
"""
The top-left matrix cell [0,0] should be given back in Sheet
coordinates at the center of that cell.
The cell [-1,-1] outside this corner should come back out of
the BoundingBox
"""
# inside
r,c = 0,0
x,y = self.left+self.half_unit,self.top-self.half_unit
test_x, test_y = self.ct.matrixidx2sheet(r,c)
self.assertEqual((test_x,test_y), (x,y))
self.assertTrue(self.box.contains(test_x,test_y))
# outside
r,c = -1,-1
test_x, test_y = self.ct.matrixidx2sheet(r,c)
self.assertFalse(self.box.contains(test_x,test_y))
def test_matrixidx2sheet_left_bottom(self):
"""
The bottom-left matrix cell [0,rbound] should be given back
in Sheet coordinates at the center of that cell.
The cell [last_row+1,-1] outside this corner should come back out of
the BoundingBox.
"""
# inside
r,c = self.last_row,0
x,y = self.left+self.half_unit,self.bottom+self.half_unit
self.assertEqual(self.ct.matrixidx2sheet(r,c), (x,y))
# outside
r,c = self.last_row+1,-1
test_x, test_y = self.ct.matrixidx2sheet(r,c)
self.assertFalse(self.box.contains(test_x,test_y))
def test_matrixidx2sheet_right_top(self):
"""
The top-right matrix cell [cbound,0] should be given back
in Sheet coordinates at the center of that cell.
The cell [-1,last_col+1] outside this corner should come back out of
the BoundingBox.
"""
# inside
r,c = 0,self.last_col
x,y = self.right-self.half_unit,self.top-self.half_unit
self.assertEqual(self.ct.matrixidx2sheet(r,c), (x,y))
# outside
r,c = -1,self.last_col+1
test_x, test_y = self.ct.matrixidx2sheet(r,c)
self.assertFalse(self.box.contains(test_x,test_y))
def test_matrixidx2sheet_right_bottom(self):
"""
The bottom-right matrix cell [cbound,rbound] should be given back
in Sheet coordinates at the center of that cell.
The cell [last_row+1,last_col+1] outside this corner should come back out of
the BoundingBox.
"""
r,c = self.last_row,self.last_col
x,y = self.right-self.half_unit,self.bottom+self.half_unit
self.assertEqual(self.ct.matrixidx2sheet(r,c), (x,y))
# outside
r,c = self.last_row+1,self.last_col+1
test_x, test_y = self.ct.matrixidx2sheet(r,c)
self.assertFalse(self.box.contains(test_x,test_y))
def test_matrixidx2sheet_center(self):
"""
The row and col *index* of the center unit in the matrix should come
back as the Sheet coordinates of the center of that center unit.
"""
r,c = self.center_unit_idx
x_center = self.left+(self.right-self.left)/2.0
y_center = self.bottom+(self.top-self.bottom)/2.0
x,y = x_center+self.half_unit, y_center-self.half_unit
self.assertEqual(self.ct.matrixidx2sheet(r,c), (x,y))
def test_matrixidx2sheet_sheet2matrixidx(self):
"""
Check that sheet2matrixidx() is the inverse of matrix2sheetidx().
"""
# top-right corner
x,y = self.ct.matrixidx2sheet(float(0),float(self.last_col))
top_row,right_col = self.ct.sheet2matrixidx(x,y)
self.assertEqual((top_row,right_col),(float(0),float(self.last_col)))
# bottom-left corner
x,y = self.ct.matrixidx2sheet(float(self.last_row),float(0))
bottom_row,left_col = self.ct.sheet2matrixidx(x,y)
self.assertEqual((bottom_row,left_col),(float(self.last_row),float(0)))
class TestBox1Coordinates(TestCoordinateTransforms):
"""
Test coordinate transformations using the standard, origin-centered unit box
with density 10.
A 10x10 matrix.
"""
def setUp(self):
self.left = -0.5
self.bottom = -0.5
self.top = 0.5
self.right = 0.5
self.density = 10
self.half_unit = 0.05
# for the matrix representation - I think having this manual statement is
# safer than a calculation...
self.last_row = 9
self.last_col = 9
self.center_unit_idx = (5,5) # by the way sheet2matrixidx is defined
self.makeBox()
class TestBox2Coordinates(TestCoordinateTransforms):
"""
Test coordinate transformations on the box defined by (1,1), (3,4),
with density 8.
A 24 x 16 matrix.
"""
def setUp(self):
self.left = 1
self.bottom = 1
self.right = 3
self.top = 4
self.density = 8
self.half_unit = 0.0625
# for the matrix representation - I think having this manual statement is
# safer than a calculation...
self.last_row = 23
self.last_col = 15
self.center_unit_idx = (12,8) # by the way sheet2matrixidx is defined
self.makeBox()
# CEB: unfinished and unused - still making tests for TestBox3Coordinates...
class TestBox3Coordinates(TestCoordinateTransforms):
def setUp(self):
self.left = -0.8
self.bottom = -0.8
self.top = 0.8
self.right = 0.8
self.density = 16
self.half_unit = 0.03125
# for the matrix representation - I think having this manual statement is
# safer than a calculation...
self.last_row = 24
self.last_col = 24
self.center_unit_idx = (12,12) # by the way sheet2matrixidx is defined
self.makeBox()
suite = unittest.TestSuite()
cases = [TestBox1Coordinates,TestBox2Coordinates]#,TestBox3Coordinates]
suite.addTests([unittest.makeSuite(case) for case in cases])
class ExtraSheetTests(unittest.TestCase):
"""
sheet tests that were written independently of the framework above.
Each of these tests runs once and is independent of the rest of the file.
"""
def test_slice2bounds_bounds2slice(self):
bb = BoundingBox(points=((-0.5,-0.5),(0.5,0.5)))
ct = SheetCoordinateSystem(bb,10)
slice_ =(0,3,7,8)
bounds = BoundingBox(points=Slice._slicespec2boundsspec(slice_,ct))
test_slice = Slice(bounds,ct)
for a,b in zip(slice_,test_slice):
self.assertEqual(a,b)
slice_ =(4,7,8,10)
bounds = BoundingBox(points=Slice._slicespec2boundsspec(slice_,ct))
test_slice = Slice(bounds,ct)
for a,b in zip(slice_,test_slice):
self.assertEqual(a,b)
slice_ =(2,3,4,8)
bounds = BoundingBox(points=Slice._slicespec2boundsspec(slice_,ct))
test_slice = Slice(bounds,ct)
for a,b in zip(slice_,test_slice):
self.assertEqual(a,b)
slice_ =(0,3,9,10)
bounds = BoundingBox(points=Slice._slicespec2boundsspec(slice_,ct))
test_slice = Slice(bounds,ct)
for a,b in zip(slice_,test_slice):
self.assertEqual(a,b)
bb = BoundingBox(points=((-0.75,-0.5),(0.75,0.5)))
ct = SheetCoordinateSystem(bb,20,20)
slice_ =(9,14,27,29)
bounds = BoundingBox(points=Slice._slicespec2boundsspec(slice_,ct))
test_slice = Slice(bounds,ct)
for a,b in zip(slice_,test_slice):
self.assertEqual(a,b)
slice_ =(0,6,0,7)
bounds = BoundingBox(points=Slice._slicespec2boundsspec(slice_,ct))
test_slice = Slice(bounds,ct)
for a,b in zip(slice_,test_slice):
self.assertEqual(a,b)
slice_ =(6,10,11,29)
bounds = BoundingBox(points=Slice._slicespec2boundsspec(slice_,ct))
test_slice = Slice(bounds,ct)
for a,b in zip(slice_,test_slice):
self.assertEqual(a,b)
bb = BoundingBox(points=((-0.5,-0.5),(0.5,0.5)))
ct = SheetCoordinateSystem(bb,7)
slice_ =(4,7,2,3)
bounds = BoundingBox(points=Slice._slicespec2boundsspec(slice_,ct))
test_slice = Slice(bounds,ct)
for a,b in zip(slice_,test_slice):
self.assertEqual(a,b)
slice_ =(0,7,0,7)
bounds = BoundingBox(points=Slice._slicespec2boundsspec(slice_,ct))
test_slice = Slice(bounds,ct)
for a,b in zip(slice_,test_slice):
self.assertEqual(a,b)
def test_coordinate_position(self):
"""
these tests duplicate some of the earlier ones,
except these use a matrix with non-integer
(right-left) and (top-bottom). This is an important
test case for the definition of density; without it,
the tests above could be passed by a variety of
sheet2matrix, bounds2shape functions, etc.
CEBALERT: transfer the box to TestBox3Coordinates and have
these tests run in the framework.
"""
l,b,r,t = (-0.8,-0.8,0.8,0.8)
# mimics that a sheet recalculates its density)
density = int(16*(r-l)) / float(r-l)
bounds = BoundingBox(points=((l,b),(r,t)))
ct = SheetCoordinateSystem(bounds,density,density)
self.assertEqual(ct.sheet2matrixidx(0.8,0.8),(0,24+1))
self.assertEqual(ct.sheet2matrixidx(0.0,0.0),(12,12))
self.assertEqual(ct.sheet2matrixidx(-0.8,-0.8),(24+1,0))
self.assertEqual(ct.matrixidx2sheet(24,0),
(((r-l) / int(density*(r-l)) / 2.0) + l,
(t-b) / int(density*(t-b)) / 2.0 + b))
self.assertEqual(ct.matrixidx2sheet(0,0),
(((r-l) / int(density*(r-l)) / 2.0) + l ,
(t-b) / int(density*(t-b)) * (int(density*(t-b)) - 0.5) + b))
x,y = ct.matrixidx2sheet(0,0)
self.assertTrue(bounds.contains(x,y))
self.assertEqual((0,0),ct.sheet2matrixidx(x,y))
x,y = ct.matrixidx2sheet(25,25)
self.assertFalse(bounds.contains(x,y))
self.assertNotEqual((24,24),ct.sheet2matrixidx(x,y))
x,y = ct.matrixidx2sheet(0,24)
self.assertTrue(bounds.contains(x,y))
self.assertEqual((0,24),ct.sheet2matrixidx(x,y))
x,y = ct.matrixidx2sheet(24,0)
self.assertTrue(bounds.contains(x,y))
self.assertEqual((24,0),ct.sheet2matrixidx(x,y))
def test_Sheet_creation(self):
# Example where the nominal x and y densities would not be equal.
# density along x =10
# density along y <10
# The density along y should become 10, by adjusting the height to be 2.0
# in this case.
# The y center of the bounds should remain -0.0025. Hence we should get
# a bottom bound of -1.0025 and a top one of 0.9975.
sheet = Sheet(nominal_density=10,
nominal_bounds=BoundingBox(points=((-0.5,-1.005),(0.5,1.0))))
self.assertEqual(sheet.xdensity,10)
self.assertEqual(sheet.xdensity,sheet.ydensity)
l,b,r,t = sheet.lbrt
self.assertEqual(l,-0.5)
self.assertEqual(r,0.5)
self.assertAlmostEqual(t,0.9975)
self.assertAlmostEqual(b,-1.0025)
# CEBALERT: this test should probably be somewhere else and
# called something different
def test_connection_field_like(self):
# test a ConnectionField-like example
sheet = Sheet(nominal_density=10,nominal_bounds=BoundingBox(radius=0.5))
cf_bounds = BoundingBox(points=((0.3,0.3),(0.6,0.6)))
slice_ = Slice(cf_bounds,sheet)
slice_.crop_to_sheet(sheet)
# check it's been cropped to fit onto sheet...
self.assertEqual(slice_.tolist(),[0,2,8,10])
# now check that it gives the correct bounds...
cropped_bounds = slice_.compute_bounds(sheet)
true_cropped_bounds = BoundingBox(points=((0.3,0.3),(0.5,0.5)))
for a,b in zip(cropped_bounds.lbrt(),true_cropped_bounds.lbrt()):
self.assertAlmostEqual(a,b)
# and that bounds2shape() gets the correct size
# rows,cols = bounds2shape(cropped_bounds,sheet.density,sheet.ydensity)
# self.assertEqual((rows,cols),(2,2))
def test_bounds2slice(self):
# test that if you ask to slice the matrix with the sheet's BoundingBox, you
# get back the whole matrix
sheet_bb = BoundingBox(points=((-0.5,-0.5),(0.5,0.5)))
ct = SheetCoordinateSystem(sheet_bb,10)
slice_ = Slice(sheet_bb,ct)
true_slice = (0,10,0,10) # inclusive left boundary, exclusive right boundary
self.assertEqual(tuple(slice_.tolist()),true_slice)
# for the following tests, the values have been all computed by hand and then
# tested (by JC). The boundingbox and density tested have been chosen randomly,
# then drawn to get the slice from it.
# Test with 20 density.
ct = SheetCoordinateSystem(sheet_bb,20,20)
bb = BoundingBox(points=((-0.05,-0.20),(0.20,0.05)))
slice_ = Slice(bb,ct)
true_slice = (9,14,9,14)
self.assertEqual(tuple(slice_.tolist()),true_slice)
bb = BoundingBox(points=((-0.40,0),(-0.30,0.30)))
slice_ = Slice(bb,ct)
true_slice = (4,10,2,4)
self.assertEqual(tuple(slice_.tolist()),true_slice)
bb = BoundingBox(points=((0.15,0.10),(0.30,0.30)))
slice_ = Slice(bb,ct)
true_slice = (4,8,13,16)
self.assertEqual(tuple(slice_.tolist()),true_slice)
bb = BoundingBox(points=((-0.05,-0.45),(0.10,-0.25)))
slice_ = Slice(bb,ct)
true_slice = (15,19,9,12)
self.assertEqual(tuple(slice_.tolist()),true_slice)
# test with 7 density sheet.
bb = BoundingBox(points=((-0.5+2.0/7.0,0.5-2.0/7.0),(-0.5+4.0/7.0,0.5)))
ct = SheetCoordinateSystem(sheet_bb,7)
slice_ = Slice(bb,ct)
true_slice = (0,2,2,4)
self.assertEqual(tuple(slice_.tolist()),true_slice)
#(4x4 matrix)
ct = SheetCoordinateSystem(BoundingBox(radius=0.2),xdensity=10,ydensity=10)
test_bounds = BoundingBox(radius=0.1)
slice_=Slice(test_bounds,ct)
r1,r2,c1,c2 = slice_
self.assertEqual((r1,r2,c1,c2),(1,3,1,3))
# Note: this test fails because units that fall on the
# boundaries should all be included; bounds2slice does not
# include the ones on the left boundary because of floating point
# representation.
#test_bounds.translate(0.05,-0.05)
#r1,r2,c1,c2 = ct.bounds2slice(test_bounds)
#self.assertEqual((r1,r2,c1,c2),(1,4,1,4))
def test_slice2bounds(self):
# test that if you ask to slice the matrix with the sheet's BoundingBox, you
# get back the whole matrix
# (I chose to use a 7 density, I don't know why I like 7 so much, it is kind of mystical)
sheet_bb = BoundingBox(points=((-0.5,-0.5),(0.5,0.5)))
ct = SheetCoordinateSystem(sheet_bb,7)
slice_ = (0,7,0,7)
bounds = BoundingBox(points=Slice._slicespec2boundsspec(slice_,ct))
true_bounds_lbrt = (-0.5,-0.5,0.5,0.5)
for a,b in zip(bounds.lbrt(),true_bounds_lbrt):
self.assertAlmostEqual(a,b)
# for the following tests, the values have been all computed
# by hand and then tested (by JC). The boundingbox and density
# tested have been chosen randomly, then drawn to get the slice
# from it.
# Test for 10 density
ct = SheetCoordinateSystem(sheet_bb,10)
slice_ = (0,9,1,5)
bounds = BoundingBox(points=Slice._slicespec2boundsspec(slice_,ct))
true_bounds_lbrt = (-0.4,-0.4,0,0.5)
for a,b in zip(bounds.lbrt(),true_bounds_lbrt):
self.assertAlmostEqual(a,b)
slice_ = (2,3,7,10)
bounds = BoundingBox(points=Slice._slicespec2boundsspec(slice_,ct))
true_bounds_lbrt = (0.2,0.2,0.5,0.3)
for a,b in zip(bounds.lbrt(),true_bounds_lbrt):
self.assertAlmostEqual(a,b)
# Test for 7 density
ct = SheetCoordinateSystem(sheet_bb,7)
slice_ = (3,7,2,5)
bounds = BoundingBox(points=Slice._slicespec2boundsspec(slice_,ct))
true_bounds_lbrt = (-0.5+2.0/7.0,-0.5,-0.5+5.0/7.0,0.5-3.0/7.0)
for a,b in zip(bounds.lbrt(),true_bounds_lbrt):
self.assertAlmostEqual(a,b)
slice_ = (2,6,0,1)
bounds = BoundingBox(points=Slice._slicespec2boundsspec(slice_,ct))
true_bounds_lbrt = (-0.5,0.5-6.0/7.0,-0.5+1.0/7.0,0.5-2.0/7.0)
for a,b in zip(bounds.lbrt(),true_bounds_lbrt):
self.assertAlmostEqual(a,b)
# Test for 25 density
ct = SheetCoordinateSystem(sheet_bb,25)
slice_ = (0,25,4,10)
bounds = BoundingBox(points=Slice._slicespec2boundsspec(slice_,ct))
true_bounds_lbrt = (-0.5+4.0/25.0,-0.5,-0.5+10.0/25.0,0.5)
for a,b in zip(bounds.lbrt(),true_bounds_lbrt):
self.assertAlmostEqual(a,b)
slice_ = (7,18,3,11)
bounds = BoundingBox(points=Slice._slicespec2boundsspec(slice_,ct))
true_bounds_lbrt = (-0.5+3.0/25.0,0.5-18.0/25.0,-0.5+11.0/25.0,0.5-7.0/25.0)
for a,b in zip(bounds.lbrt(),true_bounds_lbrt):
self.assertAlmostEqual(a,b)
## # bounds2shape() tests
## #
## def test_bounds2shape(self):
## """
## Check that the shape of the matrix based on the BoundingBox and
## density is correct.
## """
## n_rows,n_cols = bounds2shape(self.box,self.density)
## self.assertEqual((n_rows,n_cols),(self.last_row+1,self.last_col+1))
def test_sheetview_release(self):
s = Sheet()
s.activity = array([[1,2],[3,4]])
# Call s.sheet_view(..) with a parameter
sv2 = SheetView((s.activity,s.bounds),
src_name=s.name)
self.assertEqual(len(s.sheet_views.keys()),0)
s.sheet_views['Activity']=sv2
self.assertEqual(len(s.sheet_views.keys()),1)
s.release_sheet_view('Activity')
self.assertEqual(len(s.sheet_views.keys()),0)
suite.addTest(unittest.makeSuite(ExtraSheetTests))
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
import copy
import json
import uuid
import time
import urlparse
import mox
import requests
import testtools
from keystoneclient.v3 import client
def parameterize(ref):
"""Rewrites attributes to match the kwarg naming convention in client.
>>> paramterize({'project_id': 0})
{'project': 0}
"""
params = ref.copy()
for key in ref:
if key[-3:] == '_id':
params.setdefault(key[:-3], params.pop(key))
return params
class TestClient(client.Client):
def serialize(self, entity):
return json.dumps(entity, sort_keys=True)
class TestCase(testtools.TestCase):
TEST_TENANT_NAME = 'aTenant'
TEST_TOKEN = 'aToken'
TEST_USER = 'test'
TEST_ROOT_URL = 'http://127.0.0.1:5000/'
TEST_URL = '%s%s' % (TEST_ROOT_URL, 'v3')
TEST_ROOT_ADMIN_URL = 'http://127.0.0.1:35357/'
TEST_ADMIN_URL = '%s%s' % (TEST_ROOT_ADMIN_URL, 'v3')
TEST_REQUEST_BASE = {
'verify': True,
}
def setUp(self):
super(TestCase, self).setUp()
self.mox = mox.Mox()
self._original_time = time.time
time.time = lambda: 1234
requests.request = self.mox.CreateMockAnything()
self.client = TestClient(username=self.TEST_USER,
token=self.TEST_TOKEN,
tenant_name=self.TEST_TENANT_NAME,
auth_url=self.TEST_URL,
endpoint=self.TEST_URL)
def tearDown(self):
time.time = self._original_time
self.mox.UnsetStubs()
self.mox.VerifyAll()
super(TestCase, self).tearDown()
class UnauthenticatedTestCase(testtools.TestCase):
""" Class used as base for unauthenticated calls """
TEST_ROOT_URL = 'http://127.0.0.1:5000/'
TEST_URL = '%s%s' % (TEST_ROOT_URL, 'v3')
TEST_ROOT_ADMIN_URL = 'http://127.0.0.1:35357/'
TEST_ADMIN_URL = '%s%s' % (TEST_ROOT_ADMIN_URL, 'v3')
TEST_REQUEST_BASE = {
'verify': True,
}
def setUp(self):
super(UnauthenticatedTestCase, self).setUp()
self.mox = mox.Mox()
self._original_time = time.time
time.time = lambda: 1234
requests.request = self.mox.CreateMockAnything()
def tearDown(self):
time.time = self._original_time
self.mox.UnsetStubs()
self.mox.VerifyAll()
super(UnauthenticatedTestCase, self).tearDown()
class CrudTests(testtools.TestCase):
key = None
collection_key = None
model = None
manager = None
def new_ref(self, **kwargs):
kwargs.setdefault('id', uuid.uuid4().hex)
return kwargs
def additionalSetUp(self):
self.headers = {
'GET': {
'X-Auth-Token': 'aToken',
'User-Agent': 'python-keystoneclient',
}
}
self.headers['HEAD'] = self.headers['GET'].copy()
self.headers['DELETE'] = self.headers['GET'].copy()
self.headers['PUT'] = self.headers['GET'].copy()
self.headers['POST'] = self.headers['GET'].copy()
self.headers['POST']['Content-Type'] = 'application/json'
self.headers['PATCH'] = self.headers['POST'].copy()
def serialize(self, entity):
if isinstance(entity, dict):
return json.dumps({self.key: entity}, sort_keys=True)
if isinstance(entity, list):
return json.dumps({self.collection_key: entity}, sort_keys=True)
raise NotImplementedError('Are you sure you want to serialize that?')
def test_create(self, ref=None):
ref = ref or self.new_ref()
resp = TestResponse({
"status_code": 201,
"text": self.serialize(ref),
})
method = 'POST'
req_ref = ref.copy()
req_ref.pop('id')
kwargs = copy.copy(self.TEST_REQUEST_BASE)
kwargs['headers'] = self.headers[method]
kwargs['data'] = self.serialize(req_ref)
requests.request(
method,
urlparse.urljoin(
self.TEST_URL,
'v3/%s' % self.collection_key),
**kwargs).AndReturn((resp))
self.mox.ReplayAll()
returned = self.manager.create(**parameterize(req_ref))
self.assertTrue(isinstance(returned, self.model))
for attr in ref:
self.assertEqual(
getattr(returned, attr),
ref[attr],
'Expected different %s' % attr)
def test_get(self, ref=None):
ref = ref or self.new_ref()
resp = TestResponse({
"status_code": 200,
"text": self.serialize(ref),
})
method = 'GET'
kwargs = copy.copy(self.TEST_REQUEST_BASE)
kwargs['headers'] = self.headers[method]
requests.request(
method,
urlparse.urljoin(
self.TEST_URL,
'v3/%s/%s' % (self.collection_key, ref['id'])),
**kwargs).AndReturn((resp))
self.mox.ReplayAll()
returned = self.manager.get(ref['id'])
self.assertTrue(isinstance(returned, self.model))
for attr in ref:
self.assertEqual(
getattr(returned, attr),
ref[attr],
'Expected different %s' % attr)
def test_list(self, ref_list=None, expected_path=None, **filter_kwargs):
ref_list = ref_list or [self.new_ref(), self.new_ref()]
resp = TestResponse({
"status_code": 200,
"text": self.serialize(ref_list),
})
method = 'GET'
kwargs = copy.copy(self.TEST_REQUEST_BASE)
kwargs['headers'] = self.headers[method]
requests.request(
method,
urlparse.urljoin(
self.TEST_URL,
expected_path or 'v3/%s' % self.collection_key),
**kwargs).AndReturn((resp))
self.mox.ReplayAll()
returned_list = self.manager.list(**filter_kwargs)
self.assertTrue(len(returned_list))
[self.assertTrue(isinstance(r, self.model)) for r in returned_list]
def test_update(self, ref=None):
ref = ref or self.new_ref()
req_ref = ref.copy()
del req_ref['id']
resp = TestResponse({
"status_code": 200,
"text": self.serialize(ref),
})
method = 'PATCH'
kwargs = copy.copy(self.TEST_REQUEST_BASE)
kwargs['headers'] = self.headers[method]
kwargs['data'] = self.serialize(req_ref)
requests.request(
method,
urlparse.urljoin(
self.TEST_URL,
'v3/%s/%s' % (self.collection_key, ref['id'])),
**kwargs).AndReturn((resp))
self.mox.ReplayAll()
returned = self.manager.update(ref['id'], **parameterize(req_ref))
self.assertTrue(isinstance(returned, self.model))
for attr in ref:
self.assertEqual(
getattr(returned, attr),
ref[attr],
'Expected different %s' % attr)
def test_delete(self, ref=None):
ref = ref or self.new_ref()
resp = TestResponse({
"status_code": 204,
"text": '',
})
method = 'DELETE'
kwargs = copy.copy(self.TEST_REQUEST_BASE)
kwargs['headers'] = self.headers[method]
requests.request(
method,
urlparse.urljoin(
self.TEST_URL,
'v3/%s/%s' % (self.collection_key, ref['id'])),
**kwargs).AndReturn((resp))
self.mox.ReplayAll()
self.manager.delete(ref['id'])
class TestResponse(requests.Response):
""" Class used to wrap requests.Response and provide some
convenience to initialize with a dict """
def __init__(self, data):
self._text = None
super(TestResponse, self)
if isinstance(data, dict):
self.status_code = data.get('status_code', None)
self.headers = data.get('headers', None)
# Fake the text attribute to streamline Response creation
self._text = data.get('text', None)
else:
self.status_code = data
def __eq__(self, other):
return self.__dict__ == other.__dict__
@property
def text(self):
return self._text
|
|
#! /usr/bin/env python
import datetime
import logging
import logging.handlers
import threading
import time
import traceback
import os
import pwd
import sys
from pprint import pprint
from autopyfactory.apfexceptions import FactoryConfigurationFailure, CondorStatusFailure, PandaStatusFailure
from autopyfactory.logserver import LogServer
major, minor, release, st, num = sys.version_info
"""
General info scheme:
BatchStatusPlugin
getInfo -> BatchStatusInfo[apfqname] -> BatchQueueInfo(QueueInfo)
.state1 -> 0
.state2 -> 123
getJobInfo -> BatchStatusInfo[apfqname] ->
WMSStatusPlugin
getInfo -> WMSStatusInfo[wmsqname] -> JobsInfo(QueueInfo)
.state1 -> 0
.state2 -> 123
getSiteInfo -> WMSStatusInfo[sitename] -> SiteInfo(QueueInfo)
getCloudInfo -> WMSStatusInfo[cloudname] -> CloudInfo(QueueInfo)
Inheritance:
BaseStatusInfo BaseInfo
| |
V V
BatchStatusInfo BatchQueueInfo
WMSStatusInfo WMSQueueInfo
"""
class BaseStatusInfo(dict):
"""
Base for top-level Info classes with second-level Info objects indexed
by APF/WMS queue names.
"""
def __init__(self):
dict.__init__(self)
def __getitem__(self, k):
"""
Just ensure that if info for a queue is requested return None rather
than trigger a KeyError exception.
"""
if k in self.keys():
return dict.__getitem__(self, k)
else:
default_cls = self.default
# Make a new instance of whatever class is the default
return default_cls()
class BaseInfo(object):
"""
Base for aggregate (attribute-oriented) Info classes which are used per APF/WMS queue.
Public Interface:
fill(dictionary, mappings=None, reset=True)
"""
def fill(self, dictionary, mappings=None, reset=True):
"""
method to fill object attributes with values from a dictionary.
Each key of the dictionary is supposed
to be one attribute in the object.
For example, if object is instance of class
class C():
def __init__(self):
self.x = ...
self.y = ...
then, the dictionary should look like
d = {'x': ..., 'y':...}
In case the dictionary keys and object attributes
do not match, a dictionary mapping can be passed.
For example, the object is instance of class
class C():
def __init__(self):
self.x = ...
self.y = ...
and the dictionary look like
d = {'a': ..., 'b':...}
then, the mapping must be like
mapping = {'a':'x', 'b':'y'}
If reset is True, new values override whatever the attributes had.
If reset is False, the new values are added to the previous value.
"""
usedk = []
for k,v in dictionary.iteritems():
try:
if mappings:
if mappings.has_key(k):
k = mappings[k]
else:
# a key in the dictionary is not in the mapping
# we ignore that case
log = logging.getLogger('autopyfactory')
log.warning('ignoring unkown key %s in the dictionary' %k)
continue
except KeyError as e:
log = logging.getLogger('autopyfactory')
log.error("fill(): Exception: %s" % str(e))
log.error("Stack Trace: %s " % traceback.format_exc())
log.error("k: %s v: %s dictionary: %s mappings: %s" % (k,v, dictionary, mappings))
# if the key is new, then ...
# if no reset: we add the value to the old one
# if reset: we do nothing, so the final value will be the new one
# if the key is not new...
# we just add the value to the stored one
if k not in usedk:
usedk.append(k)
if not reset:
try:
v = self.__dict__[k] + v
except KeyError:
pass
self.__dict__[k] = v
else:
try:
v = self.__dict__[k] + v
except KeyError:
# missing keys no longer handled.
pass
self.__dict__[k] = v
def __getattr__(self, name):
"""
Return 0 for non-existent attributes, otherwise behave normally.
"""
try:
return int(self.__getattribute__(name))
except AttributeError:
return 0
class BatchStatusInfo(BaseStatusInfo):
"""
Information returned by BatchStatusPlugin getInfo() calls.
Contains objects indexed by APF/WMS queue name.
"""
def __init__(self):
self.log = logging.getLogger('autopyfactory')
self.default = QueueInfo
def __str__(self):
s = "BatchStatusInfo: %d queues." % len(self)
return s
class WMSStatusInfo(BaseStatusInfo):
"""
Information returned by WMSStatusPlugin getInfo() calls.
Contains objects indexed by APF/WMS queue name.
"""
def __init__(self):
self.log = logging.getLogger('autopyfactory')
self.default = WMSQueueInfo
def __str__(self):
s = "WMSStatusInfo: %d queues." % len(self)
return s
class CloudStatusInfo(BaseStatusInfo):
"""
Information returned by WMSStatusPlugin getCloudInfo() calls.
Contains objects indexed by APF/WMS queue name.
"""
def __init__(self):
self.log = logging.getLogger('autopyfactory')
class CloudInfo(BaseInfo):
"""
Attribute-based class containing WMS info about (WMS) clouds.
"""
def __init__(self):
self.log = logging.getLogger('autopyfactory')
class WMSQueueInfo(BaseInfo):
"""
-----------------------------------------------------------------------
Empty anonymous placeholder for attribute-based WMS job information.
One per WMS queue (for example, one per siteid in PanDA)
Attributes are:
- notready
- ready
- running
- done
- failed
- unknown
Note: eventually, a new class (or this one modified) will have
a valid list of attributes for statuses with labels (PanDA ProdSourceLabel)
-----------------------------------------------------------------------
"""
def __init__(self):
self.log = logging.getLogger('autopyfactory')
def __getattr__(self, name):
"""
Return 0 for non-existent attributes, otherwise behave normally.
"""
try:
return self.__getattribute__(name)
except AttributeError:
return 0
def __str__(self):
s = "WMSQueueInfo: notready=%s, ready=%s, running=%s, done=%s, failed=%s, unknown=%s" %\
(self.notready,
self.ready,
self.running,
self.done,
self.failed,
self.unknown
)
return s
class JobInfo(object):
"""
Abstract representation of job in APF.
At a minimum we need
jobid Typically Condor cluster.proc ID, but could be VM instanceid
state APF job state: submitted, pending, running, done, failed, held
inittime datetime.datetime object
"""
def __init__(self, jobid, state, inittime):
self.jobid = jobid
self.state = state
self.inittime = inittime
def __str__(self):
s = "JobInfo: jobid=%s state=%s" % (self.jobid, self.state)
return s
class SiteStatusInfo(BaseStatusInfo):
"""
Information returned by WMSStatusPlugin getSiteInfo() calls.
Contains objects indexed by APF/WMS queue name.
"""
def __init__(self):
self.log = logging.getLogger('autopyfactory')
class SiteInfo(BaseInfo):
"""
Placeholder for attribute-based site information.
One per site.
"""
def __init__(self):
self.log = logging.getLogger('autopyfactory')
class QueueInfo(BaseInfo):
"""
Empty anonymous placeholder for aggregated queue information for a single APF queue.
Returns 0 as value for any un-initialized attribute.
"""
def __init__(self):
self.log = logging.getLogger('autopyfactory')
def __str__(self):
s = "QueueInfo: pending=%d, running=%d, suspended=%d" % (self.pending,
self.running,
self.suspended)
return s
|
|
#!/usr/bin/env python3
import argparse
import configparser
import ipaddress
import logging
import logging.handlers
import sys
from collections import namedtuple
import requests
CONFIG_FILE = 'godaddy-dyndns.conf'
LOG_FILE = 'godaddy-dyndns.log'
PREVIOUS_IP_FILE = 'previous-ip.txt'
class GdClient:
BASE_URI = 'https://api.godaddy.com/v1'
def __init__(self, key, secret):
self.key = key
self.secret = secret
def _auth_header(self):
return {'Authorization': 'sso-key {}:{}'.format(self.key,
self.secret)}
def _get(self, path):
r = requests.get(self.BASE_URI + path,
headers=self._auth_header())
r.raise_for_status()
return r
def _put(self, path, data):
r = requests.request('PUT',
self.BASE_URI + path,
json=data,
headers=self._auth_header())
r.raise_for_status()
return r
def get_domains(self):
return {d['domain']: None for d in self._get('/domains').json()}
def get_A_records(self, domain):
path = '/domains/{}/records/A'.format(domain)
return self._get(path).json()
def replace_A_records(self, domain, records):
path = '/domains/{}/records/A'.format(domain)
self._put(path, records)
def replace_A_records_by_name(self, domain, record):
path = '/domains/{}/records/A/{}'.format(domain, record['name'])
self._put(path, [record])
class Conf:
def __init__(self, filename):
parser = configparser.ConfigParser()
parser.read(CONFIG_FILE)
self.key = parser.get('godaddy', 'key')
self.secret = parser.get('godaddy', 'secret')
self.domains = self.__get_domains(parser)
def __get_domains(self, parser):
ds = {}
for section in parser.sections():
if section == 'godaddy':
continue
ds[section] = parser.get(section, 'subdomains', fallback=None)
if ds[section] is not None:
ds[section] = set(map(str.strip, ds[section].split(',')))
if ds == {}:
return None
else:
return ds
def raise_if_invalid_ip(ip):
ipaddress.ip_address(ip)
def get_public_ip():
r = requests.get('https://api.ipify.org')
r.raise_for_status()
ip = r.text
raise_if_invalid_ip(ip)
return ip
def get_previous_public_ip():
try:
with open(PREVIOUS_IP_FILE, 'r') as f:
ip = f.read()
except FileNotFoundError:
return None
# Sanity check
raise_if_invalid_ip(ip)
return ip
def store_ip_as_previous_public_ip(ip):
with open(PREVIOUS_IP_FILE, 'w') as f:
f.write(ip)
def get_public_ip_if_changed(debug):
current_public_ip = get_public_ip()
if debug:
return current_public_ip
previous_public_ip = get_previous_public_ip()
if current_public_ip != previous_public_ip:
return current_public_ip
else:
return None
def init_logging(debug):
l = logging.getLogger()
l.setLevel(logging.INFO)
if debug:
l.addHandler(logging.StreamHandler())
else:
rotater = logging.handlers.RotatingFileHandler(
LOG_FILE, maxBytes=10000000, backupCount=2)
l.addHandler(rotater)
rotater.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
def span(predicate, iterable):
ts = []
fs = []
for x in iterable:
if predicate(x):
ts.append(x)
else:
fs.append(x)
return ts, fs
def all_unique(iterable):
seen = set()
for x in iterable:
if x in seen:
return False
seen.add(x)
return True
def main(args):
if args.config is not None:
global CONFIG_FILE
global LOG_FILE
global PREVIOUS_IP_FILE
CONFIG_FILE = args.config + '/' + CONFIG_FILE
LOG_FILE = args.config + '/' + LOG_FILE
PREVIOUS_IP_FILE = args.config + '/' + PREVIOUS_IP_FILE
init_logging(args.debug)
ip = get_public_ip_if_changed(args.debug)
# If the IP hasn't changed then there's nothing to do.
if ip is None:
return 0
conf = Conf(CONFIG_FILE)
client = GdClient(conf.key, conf.secret)
logging.info("New IP %s", ip)
domains = client.get_domains() if conf.domains is None else conf.domains
for d, sds in domains.items():
logging.info("Checking %s", d)
records = client.get_A_records(d)
if sds is None:
relevant_records = records
else:
relevant_records = list(filter(lambda r: r['name'] in sds, records))
non_existing = sds - set(map(lambda r: r['name'], relevant_records))
if non_existing != set():
logging.warning('Subdomains %s do not exist', ', '.join(non_existing))
if not all_unique(map(lambda r: r['name'], relevant_records)):
logging.error('Aborting: All records must have unique names. '
'Cannot update without losing information (e.g. TTL)'
'. Make sure all records have unique names before '
're-run the script.')
return 1
up_to_date, outdated = span(lambda r: ip == r['data'], relevant_records)
if up_to_date != []:
logging.info("Records %s already up to date",
", ".join(map(lambda r: r['name'], up_to_date)))
if outdated != []:
if sds is None:
# This replaces all records so we need to include
# non-relevant and non-outdated also
logging.info("Updating records %s",
", ".join(map(lambda r: ("{} ({})"
.format(r['name'],
r['data'])),
outdated)))
for r in outdated:
r['data'] = ip
client.replace_A_records(d, records)
else:
# In case we do not update all A records we cannot
# assume that we are the only writer for this
# domain. So we cannot safely overwrite everything (as
# that might overwrite what other writers have
# written) in one request.
for r in outdated:
logging.info("Updating record %s (%s)", r['name'], r['data'])
r['data'] = ip
client.replace_A_records_by_name(d, r)
store_ip_as_previous_public_ip(ip)
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true')
parser.add_argument('--config', type=str)
args = parser.parse_args()
try:
sys.exit(main(args))
except Exception as e:
logging.exception(e)
logging.shutdown()
sys.exit(1)
|
|
import numpy as np
import theano
import theano.tensor as T
from . import model
""" optimizer.py (module)
The optimizer module contains the Optimizer class that is used
to configure low-rank MNE models, initialize the solver, and
minimize the (possibly constrained and regularized) objective
function.
"""
class Optimizer(object):
""" Optimizer (class)
The Optimizer class is the interface used for constructing
and optimizing low-rank MNE models. This class is built
flexibly to allow for easy customization.
"""
def __init__(self, resp, feat, rank, cetype=None, citype=None, rtype=None, solver=None, datasets=None, **kwargs):
"""Initialize Optimizer class instantiation.
[inputs] (resp, feat, rank, cetype=None, citype=None,
rtype=None, solver=None, datasets=None, **kwargs)
resp: numpy array of the output labels with shape
(nsamp,) where nsamp is the number of data
samples. Each element of resp must be in the range
[0, 1].
feat: numpy array of the input features with shape
(nsamp, ndim) where ndim is the number of features.
rank: positive integer that sets the number of columns
of the matrices U and V(that both have shape (ndim,
rank)).
cetype: (optional) list of strings that tell the class
which equality constraints, if any, are being
used. Can set to None if no equality constraints are
used. Available equality constraints:
- "UV-linear-insert": these sets each U[:,k] =
csigns[k]*V[:,k] for all k in range(rank) and
directly imposes this constraint by substitution.
Note that csigns is a numpy array of binary
integers in {-1, 1} that sets the sign
relationship between each component of U and V.
csigns may be set using **kwargs. - "UV-linear":
these are the same constraints as UV-linear-insert
but instead of direct substitution the constraints
are imposed using the method Lagrange
multipliers. csigns must also be set through
**kwargs. - "UV-quadratic": these constraints are
the equality constraints defined by the upper
triangle of np.dot(U, U.T) == np.dot(V, V.T). -
"UV-bilinear": these constraints are the equality
constraints defined by the upper triangle (with
diagonal excluded) of np.dot(U, V.T) == np.dot(V,
U.T).
citype: (optional) list of strings that tell the class
which equality constraints, if any, are being
used. Can set to None if no equality constraints are
used. No inequality constraints are defined at this
time.
rtype: (optional) list of strings that tell the class
which regularization penalties, if any, should be
added to the objective function. Can set to None if
no penalty functions are applied. Available penalty
functions:
- "nuclear-norm": the nuclear-norm regularizes over
the Frobenius-norms of U and V and promotes
sparsity of the eigenvalue spectrum of J =
np.dot(U, V.T).
solver: (optional) must be set and initialized (using
class function init_solver) before beginning the
optimization. It is optional to set here, however.
datasets: (optional) is a dict with keys "trainset",
"cvset", "testset" with values corresponding to
Boolean indices for those samples that belong to
each the training set, cross-validation set, and
test set, respectively. If datasets is set to None,
it is assumed that all samples belong to the
training set and the other subsets are
empty. Missing fields are assumed to be empty as
well.
"""
# initialize class members to standard arguments
self.rank = rank
self.cetype = cetype
self.citype = citype
self.rtype = rtype
self.solver = solver
self.datasets = datasets
# get data sizes
self.nsamp, self.ndim = self.get_data_sizes(feat)
self.ntrain, self.ncv, self.ntest = self.get_data_subset_sample_sizes(self.nsamp, self.datasets)
# initialize class members to keyword arguments
self.fscale = self.get_model_scaling(kwargs.get("fscale", None))
self.float_dtype = kwargs.get("float_dtype", np.float64)
self.precompile = kwargs.get("precompile", True)
# declare theano variables
self.x_dev = T.vector("x_dev")
self.lda_dev = T.vector("lambda_dev")
# set-up the model(s)
self.train_model, self.cv_model, self.test_model = self.config_models(resp, feat, **kwargs)
# build expressions for model(s)
self.build_expressions(self.train_model, grad=kwargs.get("compute_grad", True),
hess=kwargs.get("compute_hess", False), **kwargs)
self.build_expressions(self.cv_model, grad=False, hess=False, **kwargs)
self.build_expressions(self.test_model, grad=False, hess=False, **kwargs)
# compile the expressions
if kwargs.get("precompile", True):
self.compile_expressions(self.train_model, grad=kwargs.get("compute_grad", True),
hess=kwargs.get("compute_hess", False), **kwargs)
self.compile_expressions(self.cv_model, grad=False, hess=False, **kwargs)
self.compile_expressions(self.test_model, grad=False, hess=False, **kwargs)
# initilize solver
if solver is not None:
self.init_solver(**kwargs)
self.initialized = True
def get_data_sizes(self, feat):
""" Get the number of samples and features.
[inputs] (feat)
feat: numpy array with shape (nsamp, ndim).
[returns] (nsamp, ndim)
nsamp: integer count of the number of samples in the
data set.
ndim: integer count of the number of features in the
data set.
"""
return feat.shape
def get_data_subset_sample_sizes(self, nsamp, datasets):
""" Get the number of samples in each of the data subsets.
[inputs] (nsamp, datasets)
nsamp: integer count of the number of samples in the
data set.
datasets: see definition from class function __init__.
[returns] (ntrain, ncv, ntest)
ntrain: integer count of the number of samples in the
training set.
ncv: integer count of the number of samples in the
cross-validation set.
ntest: integer count of the number of samples in the
test set.
"""
ntrain, ncv, ntest = 0, 0, 0
if "trainset" in datasets:
ntrain = np.sum(datasets["trainset"])
if "cvset" in datasets:
ncv = np.sum(datasets["cvset"])
if "testset" in datasets:
ntest = np.sum(datasets["testset"])
else:
ntrain = nsamp
return ntrain, ncv, ntest
def get_model_scaling(self, fscale, **kwargs):
""" Determine the scaling of the negative log-likelihood objective
function (from mner.model.py).
[inputs] (fscale, **kwargs)
fscale: dict with keys "trainset", "cvset", and
"testset" with values that give the rescaling of the
objective function for the training set,
cross-validation set, and test set, respectively. If
a value is set to <=0 then the objective function is
scaled by the number of samples in each data
subset. If a value is set to None, then the
objective function is unscaled.
[returns] fscale
fscale: see inputs.
"""
if fscale is None:
fscale = dict()
if "trainset" in self.datasets:
fscale["trainset"] = 1.0
if "cvset" in self.datasets:
fscale["cvest"] = 1.0
if "testset" in self.datasets:
fscale["testset"] = 1.0
else:
fscale["trainset"] = 1.0
else:
if not isinstance(fscale, dict):
if isinstance(fscale, list) or isinstance(fscale, tuple):
tmp = fscale.copy()
fscale = dict()
idx = 0
if "trainset" in self.datasets:
fscale["trainset"] = fscale[idx]
idx += 1
if "cvset" in self.datasets:
fscale["cvset"] = fscale[idx]
idx += 1
if "testset" in self.datasets:
fscale["testset"] = fscale[idx]
idx += 1
else:
fscale = {"trainset": fscale, "cvset": fscale, "testset": fscale}
# if the scaling is set to a negative number, set scaling to 1.0/samples
if "trainset" in fscale and fscale["trainset"] <= 0.0:
fscale["trainset"] = 1.0/self.ntrain
else:
fscale["trainset"] = 1.0
if "cvset" in fscale and fscale["cvset"] <= 0.0:
fscale["cvset"] = 1.0/self.ncv
else:
fscale["cvset"] = 1.0
if "testset" in fscale and fscale["testset"] <= 0.0:
fscale["testset"] = 1.0/self.ntest
else:
fscale["testset"] = 1.0
return fscale
def config_models(self, resp, feat, fscale=None, **kwargs):
""" Configure the low-rank MNE model(s) by instantiating the class
MNEr from mner.model.py.
[inputs] (resp, feat, fscale=None, **kwargs)
resp: see the class function __init__
feat: see the class function __init__
fscale: (optional) see the class function
get_model_scaling
[returns] (train_model, cv_model, test_model)
train_model: training set instantiation of class MNEr
with any regularization and constraints imposed.
cv_model: cross-validation set instantiation of class
MNEr (unregularized and no constraints)
test_model: test set instantiation of class MNEr
(unregularized and no constraints)
"""
self.use_vars = kwargs.get("use_vars", {'avar': True, 'hvar': True, 'UVvar': True})
self.use_consts = kwargs.get("use_consts", {'aconst': False, 'hconst': False, 'UVconst': False, 'Jconst': False})
train_model, cv_model, test_model = None, None, None
if self.datasets is None:
# model trained on entire dataset
train_model = model.MNEr(resp, feat, self.rank, cetype=self.cetype, citype=self.citype, rtype=self.rtype,
fscale=self.fscale["trainset"], use_vars=self.use_vars, use_consts=self.use_consts,
x_dev=self.x_dev, **kwargs)
else:
# model trained on subset of dataset
if "trainset" in self.datasets:
train_model = model.MNEr(resp[self.datasets["trainset"]], feat[self.datasets["trainset"],:], self.rank,
cetype=self.cetype, citype=self.citype, rtype=self.rtype,
fscale=self.fscale["trainset"], use_vars=self.use_vars,
use_consts=self.use_consts, x_dev=self.x_dev, **kwargs)
if "cvset" in self.datasets:
cv_model = model.MNEr(resp[self.datasets["cvset"]], feat[self.datasets["cvset"],:], self.rank,
cetype=self.cetype, citype=self.citype, fscale=self.fscale["cvset"],
use_vars=self.use_vars, use_consts=self.use_consts, x_dev=self.x_dev, **kwargs)
if "testset" in self.datasets:
test_model = model.MNEr(resp[self.datasets["testset"]], feat[self.datasets["testset"],:], self.rank,
cetype=self.cetype, citype=self.citype, fscale=self.fscale["testset"],
use_vars=self.use_vars, use_consts=self.use_consts, x_dev=self.x_dev, **kwargs)
return train_model, cv_model, test_model
def build_expressions(self, model, grad=True, hess=False, **kwargs):
"""Build Theano expressions for the objective, constraints, gradient,
Jacobians, and Hessian, if applicable, for a given model.
[inputs] (model, grad=True, hess=False, **kwargs)
model: instantiation of class MNEr from mner.model.py
grad: (optional) Boolean; if True, builds the
gradient.
hess: (optional) Boolean; if True, builds the Hessian.
"""
if model is not None:
# build cost expression (gradient and hessian, if applicable)
# note that regularization is included in the cost expression
model.cost_expr(self.x_dev)
if grad:
model.cost_grad_expr(self.x_dev)
if hess:
model.cost_hess_expr(self.x_dev)
# build equality constraints expressions (gradient and hessian, if applicable)
if model.num_lagrange_cetypes:
model.ceq_expr(self.x_dev)
if grad:
model.ceq_jaco_expr(self.x_dev)
if hess:
model.ceq_hess_expr(self.x_dev, self.lda_dev)
# build inequality constraints expressions (gradient and hessian, if applicable)
if model.num_lagrange_citypes:
model.cineq_expr(self.x_dev)
if grad:
model.cineq_jaco_expr(self.x_dev)
if hess:
model.cineq_hess_expr(self.x_dev)
def compile_expressions(self, model, grad=True, hess=False, **kwargs):
"""Compile Theano expressions into device functions for a given
model.
[inputs] (model, grad=True, hess=False, **kwargs)
model: instantiation of class MNEr from mner.model.py
grad: (optional) Boolean; if True, compiles the
gradient
hess: (optional) Boolean; if True, compiles the
Hessian
"""
if model is not None:
# compile cost function (gradient and hessian, if applicable)
# note that this cost function includes regularization
model.compile_cost(self.x_dev)
if grad:
model.compile_cost_grad(self.x_dev)
if hess:
model.compile_cost_hess(self.x_dev)
# compile equality constraints (gradient and hessian, if applicable)
if model.cetype is not None and len(model.cetype):
model.compile_ceq(self.x_dev)
if grad:
model.compile_ceq_jaco(self.x_dev)
if hess:
model.compile_ceq_hess(self.x_dev, self.lda_dev)
# compile inequality constraints (gradient and hessian, if applicable)
if model.citype is not None and len(model.citype):
model.compile_cineq(self.x_dev)
if grad:
model.compile_cineq_jaco(self.x_dev)
if hess:
model.compile_cineq_hess(self.x_dev, self.lda_dev)
def init_solver(self, **kwargs):
""" Initialize the solver object.
[inputs] (**kwargs)
"""
if not hasattr(self.solver, 'initialized') or self.solver.initialized == False:
self.solver = self.solver(self, **kwargs)
def compute_set(self, set_name, **kwargs):
""" Compute the objective function at the current weights.
[inputs] (set_name, **kwargs)
set_name: string with the name of the data set;
i.e. "train", "cv", "test.
[returns] fval
fval: value of the objective function evaluated on the
set_name data set.
"""
#if set_name in self.datasets:
if set_name.endswith("set"):
set_name = set_name[:-len("set")]
fval = eval("self." + set_name + "_model.cost(self.x.astype(self.float_dtype))")
return fval
def optimize(self, x0=None, **kwargs):
"""Optimize the low-rank MNE model.
[inputs] (x0=None, **kwargs)
x0: (optional) initial weight vector. If set to None,
then the class function init_vec from the class
member train_model is used.
[returns] (x, ftrain)
x: final weight vector after the optimization
completes
ftrain: value of the (regularized and constrained)
objective function at x from class member
train_model.
"""
# if initial weights are not provided, generate some
if x0 is None:
x0 = self.train_model.init_vec()
self.x, self.ftrain = self.solver.solve(x0.astype(self.float_dtype), **kwargs)
return self.x, self.ftrain
def get(self, name, default=None):
""" Get attribute, if it exists; otherwise, return default.
[inputs] (name, default=None)
name: string identifying the attribute name.
default: (optional) if attribute does not exist,
return a default value.
[returns] attr_val
attr_val: either the requested attribute identified by
name or the default, when appropriate.
"""
return getattr(self, name, default)
def __getitem__(self, name):
return self.get(name)
|
|
#!/usr/bin/env python3
# Copyright (C) 2020-2021 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
"Tests for the `btclib.tx` module."
import json
from os import path
import pytest
from btclib.exceptions import BTClibValueError
from btclib.script.witness import Witness
from btclib.tx.tx import Tx
from btclib.tx.tx_in import OutPoint, TxIn
from btclib.tx.tx_out import TxOut
def test_tx() -> None:
# default constructor
tx = Tx()
assert not tx.is_segwit()
assert not any(bool(w) for w in tx.vwitness)
assert not any(bool(tx_in.script_witness) for tx_in in tx.vin)
assert not tx.is_coinbase()
assert tx.version == 1
assert tx.lock_time == 0
assert not tx.vin
assert not tx.vout
assert tx.nVersion == tx.version
assert tx.nLockTime == tx.lock_time
tx_id = "d21633ba23f70118185227be58a63527675641ad37967e2aa461559f577aec43"
assert tx.id.hex() == tx_id
assert tx.hash == tx.id
assert tx.size == 10
assert tx.vsize == tx.size
assert tx.weight == tx.size * 4
tx_2 = Tx.from_dict(tx.to_dict())
assert tx_2.is_segwit() == tx.is_segwit()
assert tx_2 == tx
tx_2 = Tx.parse(tx.serialize(include_witness=True))
assert tx_2.is_segwit() == tx.is_segwit()
assert tx_2 == tx
tx_2 = Tx.parse(tx.serialize(include_witness=False))
assert not tx_2.is_segwit()
assert tx_2 == tx
# non-default constructor, no segwit
prev_out = OutPoint(
"9dcfdb5836ecfe146bdaa896605ba21222f83cd014dd47adde14fab2aba7de9b", 1
)
script_sig = b""
sequence = 0xFFFFFFFF
tx_in = TxIn(prev_out, script_sig, sequence)
tx_out1 = TxOut(2500000, "a914f987c321394968be164053d352fc49763b2be55c87")
tx_out2 = TxOut(
6381891, "0020701a8d401c84fb13e6baf169d59684e17abd9fa216c8cc5b9fc63d622ff8c58d"
)
version = 1
lock_time = 0
tx = Tx(version, lock_time, [tx_in], [tx_out1, tx_out2])
assert not tx.is_segwit()
assert not any(bool(w) for w in tx.vwitness)
assert not any(bool(tx_in.script_witness) for tx_in in tx.vin)
assert not tx.is_coinbase()
assert tx.version == 1
assert tx.lock_time == 0
assert len(tx.vin) == 1
assert len(tx.vout) == 2
assert tx.nVersion == tx.version
assert tx.nLockTime == tx.lock_time
tx_id = "4e52f7848dab7dd89ef7ba477939574198a170bfcb2fb34355c69f5e0169f63c"
assert tx.id.hex() == tx_id
assert tx.hash == tx.id
assert tx.size == 126
assert tx.vsize == tx.size
assert tx.weight == tx.size * 4
tx_2 = Tx.from_dict(tx.to_dict())
assert tx_2.is_segwit() == tx.is_segwit()
assert tx_2 == tx
tx_2 = Tx.parse(tx.serialize(include_witness=True))
assert tx_2.is_segwit() == tx.is_segwit()
assert tx_2 == tx
tx_2 = Tx.parse(tx.serialize(include_witness=False))
assert not tx_2.is_segwit()
assert tx_2 == tx
# non-default constructor, with segwit
version = 1
lock_time = 0
tx = Tx(version, lock_time, [tx_in], [tx_out1, tx_out2])
stack = [
"",
"30440220421fbbedf2ee096d6289b99973509809d5e09589040d5e0d453133dd11b2f78a02205686dbdb57e0c44e49421e9400dd4e931f1655332e8d078260c9295ba959e05d01",
"30440220398f141917e4525d3e9e0d1c6482cb19ca3188dc5516a3a5ac29a0f4017212d902204ea405fae3a58b1fc30c5ad8ac70a76ab4f4d876e8af706a6a7b4cd6fa100f4401",
"52210375e00eb72e29da82b89367947f29ef34afb75e8654f6ea368e0acdfd92976b7c2103a1b26313f430c4b15bb1fdce663207659d8cac749a0e53d70eff01874496feff2103c96d495bfdd5ba4145e3e046fee45e84a8a48ad05bd8dbb395c011a32cf9f88053ae",
]
tx.vin[0].script_witness = Witness(stack)
assert tx.is_segwit()
assert any(bool(w) for w in tx.vwitness)
assert any(bool(tx_in.script_witness) for tx_in in tx.vin)
assert not tx.is_coinbase()
assert tx.version == 1
assert tx.lock_time == 0
assert len(tx.vin) == 1
assert len(tx.vout) == 2
assert tx.nVersion == tx.version
assert tx.nLockTime == tx.lock_time
tx_id = "4e52f7848dab7dd89ef7ba477939574198a170bfcb2fb34355c69f5e0169f63c"
assert tx.id.hex() == tx_id
hash_ = "d39eb3e3954be4bdc0b3be2d980124b1e1e11fb414b886b52939b07d95a58a8f"
assert tx.hash.hex() == hash_
assert tx.size == 380
assert tx.vsize == 190
assert tx.weight == 758
tx_2 = Tx.from_dict(tx.to_dict())
assert tx_2.is_segwit() == tx.is_segwit()
assert tx_2 == tx
tx_2 = Tx.parse(tx.serialize(include_witness=True))
assert tx_2.is_segwit() == tx.is_segwit()
assert tx_2 == tx
tx_2 = Tx.parse(tx.serialize(include_witness=False))
assert not tx_2.is_segwit()
assert tx_2 != tx
def test_exceptions() -> None:
tx_bytes = "010000000001019bdea7abb2fa14dead47dd14d03cf82212a25b6096a8da6b14feec3658dbcf9d0100000000ffffffff02a02526000000000017a914f987c321394968be164053d352fc49763b2be55c874361610000000000220020701a8d401c84fb13e6baf169d59684e17abd9fa216c8cc5b9fc63d622ff8c58d04004730440220421fbbedf2ee096d6289b99973509809d5e09589040d5e0d453133dd11b2f78a02205686dbdb57e0c44e49421e9400dd4e931f1655332e8d078260c9295ba959e05d014730440220398f141917e4525d3e9e0d1c6482cb19ca3188dc5516a3a5ac29a0f4017212d902204ea405fae3a58b1fc30c5ad8ac70a76ab4f4d876e8af706a6a7b4cd6fa100f44016952210375e00eb72e29da82b89367947f29ef34afb75e8654f6ea368e0acdfd92976b7c2103a1b26313f430c4b15bb1fdce663207659d8cac749a0e53d70eff01874496feff2103c96d495bfdd5ba4145e3e046fee45e84a8a48ad05bd8dbb395c011a32cf9f88053ae00000000"
tx = Tx.parse(tx_bytes)
tx.version = 0
with pytest.raises(BTClibValueError, match="invalid version: "):
tx.assert_valid()
tx = Tx.parse(tx_bytes)
tx.version = 0xFFFFFFFF + 1
with pytest.raises(BTClibValueError, match="invalid version: "):
tx.assert_valid()
tx = Tx.parse(tx_bytes)
tx.lock_time = 0xFFFFFFFF + 1
with pytest.raises(BTClibValueError, match="invalid lock time: "):
tx.assert_valid()
def test_standard() -> None:
tx_bytes = "010000000001019bdea7abb2fa14dead47dd14d03cf82212a25b6096a8da6b14feec3658dbcf9d0100000000ffffffff02a02526000000000017a914f987c321394968be164053d352fc49763b2be55c874361610000000000220020701a8d401c84fb13e6baf169d59684e17abd9fa216c8cc5b9fc63d622ff8c58d04004730440220421fbbedf2ee096d6289b99973509809d5e09589040d5e0d453133dd11b2f78a02205686dbdb57e0c44e49421e9400dd4e931f1655332e8d078260c9295ba959e05d014730440220398f141917e4525d3e9e0d1c6482cb19ca3188dc5516a3a5ac29a0f4017212d902204ea405fae3a58b1fc30c5ad8ac70a76ab4f4d876e8af706a6a7b4cd6fa100f44016952210375e00eb72e29da82b89367947f29ef34afb75e8654f6ea368e0acdfd92976b7c2103a1b26313f430c4b15bb1fdce663207659d8cac749a0e53d70eff01874496feff2103c96d495bfdd5ba4145e3e046fee45e84a8a48ad05bd8dbb395c011a32cf9f88053ae00000000"
tx = Tx.parse(tx_bytes)
tx.version = 0xFFFFFFFF + 1
with pytest.raises(BTClibValueError, match="invalid version: "):
tx.assert_standard()
tx = Tx.parse(tx_bytes)
tx.version = 0xFFFFFFFF
tx.assert_valid()
tx = Tx.parse(tx_bytes)
tx.version = 0xFFFFFFFF
with pytest.raises(BTClibValueError, match="invalid version: "):
tx.assert_standard()
def test_coinbase_block_1() -> None:
coinbase_out = "00f2052a0100000043410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac"
tx_out = TxOut.parse(coinbase_out)
assert tx_out.serialize().hex() == coinbase_out
coinbase_inp = ( # prev_out
"0000000000000000000000000000000000000000000000000000000000000000ffffffff"
"0704ffff001d0104" # script_sig
"ffffffff" # sequence
)
tx_in = TxIn.parse(coinbase_inp)
assert tx_in.serialize().hex() == coinbase_inp
assert tx_in.prev_out.is_coinbase
coinbase = "01000000" "01" + coinbase_inp + "01" + coinbase_out + "00000000"
tx = Tx.parse(coinbase)
assert tx.serialize(include_witness=True).hex() == coinbase
assert tx == Tx.from_dict(tx.to_dict())
assert tx.version == 1
assert tx.lock_time == 0
assert len(tx.vin) == 1
assert len(tx.vout) == 1
assert tx.vin[0].script_sig == tx_in.script_sig
assert tx.vout[0].script_pub_key == tx_out.script_pub_key
tx_id = "0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098"
assert tx.id.hex() == tx_id
assert tx.id == tx.hash
assert tx.size == 134
assert tx.vsize == tx.size
assert tx.weight == tx.size * 4
assert not tx.is_segwit()
assert not any(bool(w) for w in tx.vwitness)
assert not any(bool(tx_in.script_witness) for tx_in in tx.vin)
assert tx.is_coinbase()
# https://en.bitcoin.it/wiki/Protocol_documentation#tx
def test_wiki_transaction() -> None:
tx_bytes = "01000000016dbddb085b1d8af75184f0bc01fad58d1266e9b63b50881990e4b40d6aee3629000000008b483045022100f3581e1972ae8ac7c7367a7a253bc1135223adb9a468bb3a59233f45bc578380022059af01ca17d00e41837a1d58e97aa31bae584edec28d35bd96923690913bae9a0141049c02bfc97ef236ce6d8fe5d94013c721e915982acd2b12b65d9b7d59e20a842005f8fc4e02532e873d37b96f09d6d4511ada8f14042f46614a4c70c0f14beff5ffffffff02404b4c00000000001976a9141aa0cd1cbea6e7458a7abad512a9d9ea1afb225e88ac80fae9c7000000001976a9140eab5bea436a0484cfab12485efda0b78b4ecc5288ac00000000"
tx = Tx.parse(tx_bytes)
assert tx.serialize(include_witness=True).hex() == tx_bytes
assert tx == Tx.from_dict(tx.to_dict())
assert tx.version == 1
assert tx.lock_time == 0
assert len(tx.vin) == 1
assert len(tx.vout) == 2
assert tx.vout[0].value == 5000000
assert tx.vout[1].value == 3354000000
tx_id = "d4a73f51ab7ee7acb4cf0505d1fab34661666c461488e58ec30281e2becd93e2"
assert tx.id.hex() == tx_id
assert tx.hash == tx.id
assert tx.size == 258
assert tx.vsize == tx.size
assert tx.weight == tx.size * 4
assert not tx.is_segwit()
assert not any(bool(w) for w in tx.vwitness)
assert not any(bool(tx_in.script_witness) for tx_in in tx.vin)
assert not tx.is_coinbase()
def test_single_witness() -> None:
# 4e52f7848dab7dd89ef7ba477939574198a170bfcb2fb34355c69f5e0169f63c
tx_bytes = "010000000001019bdea7abb2fa14dead47dd14d03cf82212a25b6096a8da6b14feec3658dbcf9d0100000000ffffffff02a02526000000000017a914f987c321394968be164053d352fc49763b2be55c874361610000000000220020701a8d401c84fb13e6baf169d59684e17abd9fa216c8cc5b9fc63d622ff8c58d04004730440220421fbbedf2ee096d6289b99973509809d5e09589040d5e0d453133dd11b2f78a02205686dbdb57e0c44e49421e9400dd4e931f1655332e8d078260c9295ba959e05d014730440220398f141917e4525d3e9e0d1c6482cb19ca3188dc5516a3a5ac29a0f4017212d902204ea405fae3a58b1fc30c5ad8ac70a76ab4f4d876e8af706a6a7b4cd6fa100f44016952210375e00eb72e29da82b89367947f29ef34afb75e8654f6ea368e0acdfd92976b7c2103a1b26313f430c4b15bb1fdce663207659d8cac749a0e53d70eff01874496feff2103c96d495bfdd5ba4145e3e046fee45e84a8a48ad05bd8dbb395c011a32cf9f88053ae00000000"
tx = Tx.parse(tx_bytes)
assert tx.serialize(include_witness=True).hex() == tx_bytes
assert tx == Tx.from_dict(tx.to_dict())
assert tx.version == 1
assert tx.lock_time == 0
assert len(tx.vin) == 1
assert len(tx.vout) == 2
stack = [
"",
"30440220421fbbedf2ee096d6289b99973509809d5e09589040d5e0d453133dd11b2f78a02205686dbdb57e0c44e49421e9400dd4e931f1655332e8d078260c9295ba959e05d01",
"30440220398f141917e4525d3e9e0d1c6482cb19ca3188dc5516a3a5ac29a0f4017212d902204ea405fae3a58b1fc30c5ad8ac70a76ab4f4d876e8af706a6a7b4cd6fa100f4401",
"52210375e00eb72e29da82b89367947f29ef34afb75e8654f6ea368e0acdfd92976b7c2103a1b26313f430c4b15bb1fdce663207659d8cac749a0e53d70eff01874496feff2103c96d495bfdd5ba4145e3e046fee45e84a8a48ad05bd8dbb395c011a32cf9f88053ae",
]
witness = Witness(stack)
assert tx.vin[0].script_witness == witness
tx_id = "4e52f7848dab7dd89ef7ba477939574198a170bfcb2fb34355c69f5e0169f63c"
assert tx.id.hex() == tx_id
hash_ = "d39eb3e3954be4bdc0b3be2d980124b1e1e11fb414b886b52939b07d95a58a8f"
assert tx.hash.hex() == hash_
assert tx.size == 380
assert tx.vsize == 190
assert tx.weight == 758
assert tx.is_segwit()
assert any(bool(w) for w in tx.vwitness)
assert not tx.is_coinbase()
def test_double_witness() -> None:
tx_bytes = "01000000000102322d4f05c3a4f78e97deda01bd8fc5ff96777b62c8f2daa72b02b70fa1e3e1051600000017160014e123a5263695be634abf3ad3456b4bf15f09cc6afffffffffdfee6e881f12d80cbcd6dc54c3fe390670678ebd26c3ae2dd129f41882e3efc25000000171600145946c8c3def6c79859f01b34ad537e7053cf8e73ffffffff02c763ac050000000017a9145ffd6df9bd06dedb43e7b72675388cbfc883d2098727eb180a000000001976a9145f9e96f739198f65d249ea2a0336e9aa5aa0c7ed88ac024830450221009b364c1074c602b2c5a411f4034573a486847da9c9c2467596efba8db338d33402204ccf4ac0eb7793f93a1b96b599e011fe83b3e91afdc4c7ab82d765ce1da25ace01210334d50996c36638265ad8e3cd127506994100dd7f24a5828155d531ebaf736e160247304402200c6dd55e636a2e4d7e684bf429b7800a091986479d834a8d462fbda28cf6f8010220669d1f6d963079516172f5061f923ef90099136647b38cc4b3be2a80b820bdf90121030aa2a1c2344bc8f38b7a726134501a2a45db28df8b4bee2df4428544c62d731400000000"
tx = Tx.parse(tx_bytes)
assert tx.serialize(include_witness=True).hex() == tx_bytes
assert tx == Tx.from_dict(tx.to_dict())
assert tx.version == 1
assert tx.lock_time == 0
assert len(tx.vin) == 2
assert len(tx.vout) == 2
stack1 = [
"30450221009b364c1074c602b2c5a411f4034573a486847da9c9c2467596efba8db338d33402204ccf4ac0eb7793f93a1b96b599e011fe83b3e91afdc4c7ab82d765ce1da25ace01",
"0334d50996c36638265ad8e3cd127506994100dd7f24a5828155d531ebaf736e16",
]
witness1 = Witness(stack1)
assert tx.vin[0].script_witness == witness1
stack2 = [
"304402200c6dd55e636a2e4d7e684bf429b7800a091986479d834a8d462fbda28cf6f8010220669d1f6d963079516172f5061f923ef90099136647b38cc4b3be2a80b820bdf901",
"030aa2a1c2344bc8f38b7a726134501a2a45db28df8b4bee2df4428544c62d7314",
]
witness2 = Witness(stack2)
assert tx.vin[1].script_witness == witness2
tx_id = "a4b76807519aba5740f7865396bc4c5ca0eb8aa7c3744ca2db88fcc9e345424c"
assert tx.id.hex() == tx_id
hash_ = "0936cb8dba90e11345b9c05f457f139ddce4a5329701af4708b2cf4a02d75adb"
assert tx.hash.hex() == hash_
assert tx.size == 421
assert tx.vsize == 259
assert tx.weight == 1033
assert tx.is_segwit()
assert any(bool(w) for w in tx.vwitness)
assert any(bool(tx_in.script_witness) for tx_in in tx.vin)
assert not tx.is_coinbase()
def test_dataclasses_json_dict() -> None:
fname = "d4f3c2c3c218be868c77ae31bedb497e2f908d6ee5bbbe91e4933e6da680c970.bin"
filename = path.join(path.dirname(__file__), "_data", fname)
with open(filename, "rb") as binary_file_:
tx = Tx.parse(binary_file_.read())
# Tx dataclass
assert isinstance(tx, Tx)
assert tx.is_segwit()
assert any(bool(w) for w in tx.vwitness)
assert any(bool(tx_in.script_witness) for tx_in in tx.vin)
assert tx.vin[0].script_witness
assert tx.vin[0].script_witness.stack
# Tx dataclass to dict
tx_dict = tx.to_dict()
assert isinstance(tx_dict, dict)
assert tx_dict["vin"][0]["txinwitness"]["stack"] # type: ignore
# Tx dataclass dict to file
datadir = path.join(path.dirname(__file__), "_generated_files")
filename = path.join(datadir, "tx.json")
with open(filename, "w", encoding="ascii") as file_:
json.dump(tx_dict, file_, indent=4)
# Tx dataclass dict from file
with open(filename, "r", encoding="ascii") as file_:
tx_dict2 = json.load(file_)
assert isinstance(tx_dict2, dict)
assert tx_dict2["vin"][0]["txinwitness"]["stack"] # type: ignore
assert tx_dict == tx_dict2
# Tx dataclass from dict
tx2 = Tx.from_dict(tx_dict)
assert isinstance(tx2, Tx)
assert tx.vin[0] == tx2.vin[0]
assert tx2.vin[0].script_witness
assert tx2.vin[0].script_witness.stack
assert tx2.is_segwit()
assert any(bool(w) for w in tx2.vwitness)
assert any(bool(tx_in.script_witness) for tx_in in tx2.vin)
assert tx == tx2
|
|
#! /usr/bin/env python
#
# ipg.py -- Module for simple FITS viewer in an HTML5 canvas web browser.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
This example illustrates using a Ginga widget in a web browser, All the
rendering is done on the server side and the browser only acts as a display
front end. Using this you could create an analysis type environment on a
server and view it via a browser.
See example usage with an ipython notebook at:
https://gist.github.com/ejeschke/6067409
You will need a reasonably modern web browser with HTML5 canvas support.
Tested with Chromium 41.0.2272.76, Firefox 37.0.2, Safari 7.1.6
"""
from __future__ import print_function
import sys, os
import logging
import threading
import tornado.web
import tornado.ioloop
from ginga import AstroImage, colors
from ginga.canvas.CanvasObject import get_canvas_types
from ginga.misc import log, Task
from ginga.util import catalog, iohelper
from ginga.web.pgw import templates, js, PgHelp, Widgets, Viewers
class EnhancedCanvasView(Viewers.CanvasView):
def embed(self, width=600, height=650):
"""
Embed a viewer into a Jupyter notebook.
"""
from IPython.display import IFrame
return IFrame(self.url, width, height)
def open(self, new=1):
"""
Open this viewer in a new browser window or tab.
(requires `webbrowser` module)
"""
import webbrowser
webbrowser.open(self.url, new=new)
def show(self):
"""
Capture the window of a viewer.
"""
from IPython.display import Image
return Image(data=bytes(self.get_rgb_image_as_bytes(format='png')),
format='png', embed=True)
def load_fits(self, filepath):
"""
Load a FITS file into the viewer.
"""
image = AstroImage.AstroImage(logger=self.logger)
image.load_file(filepath)
self.set_image(image)
load = load_fits
def load_hdu(self, hdu):
"""
Load an HDU into the viewer.
"""
image = AstroImage.AstroImage(logger=self.logger)
image.load_hdu(hdu)
self.set_image(image)
def load_data(self, data_np):
"""
Load raw numpy data into the viewer.
"""
image = AstroImage.AstroImage(logger=self.logger)
image.set_data(data_np)
self.set_image(image)
def add_canvas(self, tag=None):
# add a canvas to the view
my_canvas = self.get_canvas()
DrawingCanvas = my_canvas.get_draw_class('drawingcanvas')
canvas = DrawingCanvas()
# enable drawing on the canvas
canvas.enable_draw(True)
canvas.enable_edit(True)
canvas.ui_setActive(True)
canvas.set_surface(self)
canvas.register_for_cursor_drawing(self)
# add the canvas to the view.
my_canvas.add(canvas, tag=tag)
return canvas
def resize(self, width, height):
# this shouldn't be needed
self.configure_window(width, height)
self.pgcanvas.resize(width, height)
# hack to force a browser reload
app = self.pgcanvas.get_app()
app.do_operation('reload_page', id=self.pgcanvas.id)
class ImageViewer(object):
def __init__(self, logger, window, viewer_class=None):
if viewer_class is None:
viewer_class = EnhancedCanvasView
self.logger = logger
self.url = window.url
self.drawcolors = colors.get_colors()
self.dc = get_canvas_types()
self.top = window
self.top.add_callback('close', self.closed)
vbox = Widgets.VBox()
vbox.set_border_width(2)
vbox.set_spacing(1)
fi = viewer_class(logger)
fi.url = self.url
fi.enable_autocuts('on')
fi.set_autocut_params('zscale')
fi.enable_autozoom('on')
fi.set_zoom_algorithm('rate')
fi.set_zoomrate(1.4)
fi.show_pan_mark(True)
fi.set_callback('drag-drop', self.drop_file)
fi.set_callback('none-move', self.motion)
fi.set_bg(0.2, 0.2, 0.2)
fi.ui_setActive(True)
self.fitsimage = fi
bd = fi.get_bindings()
bd.enable_all(True)
# so trackpad scrolling can be adjusted
settings = bd.get_settings()
settings.set(scroll_zoom_direct_scale=True)
# so trackpad scrolling can be adjusted
settings = bd.get_settings()
settings.set(scroll_zoom_direct_scale=True,
scroll_zoom_acceleration=0.07)
# canvas that we will draw on
canvas = self.dc.DrawingCanvas()
#canvas.enable_draw(True)
#canvas.enable_edit(True)
#canvas.set_drawtype('rectangle', color='lightblue')
canvas.setSurface(fi)
self.canvas = canvas
# add canvas to view
private_canvas = fi.get_canvas()
private_canvas.add(canvas)
canvas.ui_setActive(True)
#canvas.register_for_cursor_drawing(fi)
fi.set_canvas(canvas)
## self.drawtypes = canvas.get_drawtypes()
## self.drawtypes.sort()
# add a color bar
private_canvas.add(self.dc.ColorBar(side='bottom', offset=10))
# add little mode indicator that shows modal states in
# the corner
private_canvas.add(self.dc.ModeIndicator(corner='ur', fontsize=14))
# little hack necessary to get correct operation of the mode indicator
# in all circumstances
bm = fi.get_bindmap()
bm.add_callback('mode-set', lambda *args: fi.redraw(whence=3))
fi.set_desired_size(512, 512)
w = Viewers.GingaViewerWidget(viewer=fi)
vbox.add_widget(w, stretch=1)
self.readout = Widgets.Label("")
vbox.add_widget(self.readout, stretch=0)
hbox = Widgets.HBox()
hbox.add_widget(Widgets.Label('Zoom sensitivity: '))
slider = Widgets.Slider(orientation='horizontal', dtype=float)
slider.add_callback('value-changed',
lambda w, val: self.adjust_scrolling_accel_cb(val))
slider.set_limits(0.0, 12.0, 0.005)
slider.set_value(8.0)
hbox.add_widget(slider, stretch=1)
hbox.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(hbox, stretch=0)
# need to put this in an hbox with an expanding label or the
# browser wants to resize the canvas, distorting it
hbox = Widgets.HBox()
hbox.add_widget(vbox, stretch=0)
hbox.add_widget(Widgets.Label(''), stretch=1)
self.top.set_widget(hbox)
def load_file(self, filepath):
image = AstroImage.AstroImage(logger=self.logger)
image.load_file(filepath)
self.fitsimage.set_image(image)
self.top.set_title(filepath)
def drop_file(self, fitsimage, paths):
fileName = paths[0]
self.load_file(fileName)
def motion(self, viewer, button, data_x, data_y):
# Get the value under the data coordinates
try:
#value = viewer.get_data(data_x, data_y)
# We report the value across the pixel, even though the coords
# change halfway across the pixel
value = viewer.get_data(int(data_x+0.5), int(data_y+0.5))
except Exception:
value = None
fits_x, fits_y = data_x + 1, data_y + 1
# Calculate WCS RA
try:
# NOTE: image function operates on DATA space coords
image = viewer.get_image()
if image is None:
# No image loaded
return
ra_txt, dec_txt = image.pixtoradec(fits_x, fits_y,
format='str', coords='fits')
except Exception as e:
self.logger.warning("Bad coordinate conversion: %s" % (
str(e)))
ra_txt = 'BAD WCS'
dec_txt = 'BAD WCS'
text = "RA: %s DEC: %s X: %.2f Y: %.2f Value: %s" % (
ra_txt, dec_txt, fits_x, fits_y, value)
self.readout.set_text(text)
def adjust_scrolling_accel_cb(self, val):
def f(x):
return (1.0 / 2.0**(10.0-x))
val2 = f(val)
self.logger.debug("slider value is %f, setting will be %f" % (val, val2))
settings = self.fitsimage.get_bindings().get_settings()
settings.set(scroll_zoom_acceleration=val2)
return True
def closed(self, w):
self.logger.info("Top window closed.")
self.top = None
sys.exit()
class FileHandler(tornado.web.RequestHandler):
def initialize(self, name, factory):
self.name = name
self.viewer_factory = factory
self.logger = factory.logger
self.logger.info("filehandler initialize")
def get(self):
self.logger.info("filehandler get")
# Collect arguments
wid = self.get_argument('id', None)
# Get window with this id
window = self.app.get_window(wid)
output = window.render()
self.write(output)
class ViewerFactory(object):
def __init__(self, logger, basedir, app, thread_pool):
"""
Constructor parameters:
`logger` : a logging-module compatible logger object
`basedir`: directory to which paths requested on the viewer
are considered relative to.
"""
self.logger = logger
self.basedir = basedir
self.app = app
self.thread_pool = thread_pool
# dict of viewers
self.viewers = {}
def get_basedir(self):
return self.basedir
def get_threadpool(self):
return self.thread_pool
def get_viewer(self, v_id):
"""
Get an existing viewer by viewer id. If the viewer does not yet
exist, make a new one.
"""
try:
return self.viewers[v_id]
except KeyError:
pass
# create top level window
window = self.app.make_window("Viewer %s" % v_id, wid=v_id)
# our own viewer object, customized with methods (see above)
viewer = ImageViewer(self.logger, window)
#viewer.url = window.url
self.viewers[v_id] = viewer
return viewer
class WebServer(object):
def __init__(self, app, thread_pool, factory,
host='localhost', port=9909, ev_quit=None):
self.host = host
self.port = port
self.app = app
self.logger = app.logger
self.thread_pool = thread_pool
self.factory = factory
if ev_quit is None:
ev_quit = threading.Event()
self.ev_quit = ev_quit
self.server = None
def start(self, use_thread=True, no_ioloop=False):
self.thread_pool.startall()
js_path = os.path.dirname(js.__file__)
self.server = tornado.web.Application([
(r"/js/(.*\.js)", tornado.web.StaticFileHandler,
{"path": js_path}),
(r"/viewer", FileHandler,
dict(name='Ginga', factory=self.factory)),
(r"/app", PgHelp.WindowHandler,
dict(name='Application', url='/app', app=self.app)),
(r"/app/socket", PgHelp.ApplicationHandler,
dict(name='Ginga', app=self.app)),
## ("/viewer/socket", ViewerWidget,
## dict(name='Ginga', factory=self.factory)),
],
factory=self.factory, logger=self.logger)
self.server.listen(self.port, self.host)
if no_ioloop:
self.t_ioloop = None
else:
self.t_ioloop = tornado.ioloop.IOLoop.instance()
if use_thread:
task = Task.FuncTask2(self.t_ioloop.start)
self.thread_pool.addTask(task)
else:
self.t_ioloop.start()
def stop(self):
# how to stop tornado server?
if not self.t_ioloop is None:
self.t_ioloop.stop()
self.thread_pool.stopall()
self.ev_quit.set()
def get_viewer(self, v_id):
from IPython.display import display, HTML
v = self.factory.get_viewer(v_id)
url = v.top.url
viewer = v.fitsimage
viewer.url = url
#display(HTML('<a href="%s">link to viewer</a>' % url))
return viewer
def make_server(logger=None, basedir='.', numthreads=5,
host='localhost', port=9909, use_opencv=False):
if logger is None:
logger = log.get_logger("ipg", null=True)
ev_quit = threading.Event()
if use_opencv:
from ginga import trcalc
try:
trcalc.use('opencv')
except Exception as e:
logger.warning("Error using opencv: %s" % str(e))
thread_pool = Task.ThreadPool(numthreads, logger,
ev_quit=ev_quit)
base_url = "http://%s:%d/app" % (host, port)
app = Widgets.Application(logger=logger, base_url=base_url,
host=host, port=port)
factory = ViewerFactory(logger, basedir, app, thread_pool)
server = WebServer(app, thread_pool, factory,
host=host, port=port)
return server
def main(options, args):
logger = log.get_logger("ipg", options=options)
server = make_server(logger=logger, basedir=options.basedir,
numthreads=options.numthreads, host=options.host,
port=options.port, use_opencv=options.use_opencv)
try:
server.start(use_thread=False)
except KeyboardInterrupt:
logger.info("Interrupted!")
server.stop()
logger.info("Server terminating...")
if __name__ == "__main__":
# Parse command line options with nifty optparse module
from optparse import OptionParser
usage = "usage: %prog [options] cmd [args]"
optprs = OptionParser(usage=usage, version=('%%prog'))
optprs.add_option("-d", "--basedir", dest="basedir", metavar="DIR",
default=".",
help="Directory which is at the base of file open requests")
optprs.add_option("--debug", dest="debug", default=False, action="store_true",
help="Enter the pdb debugger on main()")
optprs.add_option("--host", dest="host", metavar="HOST",
default="localhost",
help="HOST used to decide which interfaces to listen on")
optprs.add_option("--log", dest="logfile", metavar="FILE",
help="Write logging output to FILE")
optprs.add_option("--loglevel", dest="loglevel", metavar="LEVEL",
type='int', default=logging.INFO,
help="Set logging level to LEVEL")
optprs.add_option("--numthreads", dest="numthreads", type="int",
default=5, metavar="NUM",
help="Start NUM threads in thread pool")
optprs.add_option("--stderr", dest="logstderr", default=False,
action="store_true",
help="Copy logging also to stderr")
optprs.add_option("--opencv", dest="use_opencv", default=False,
action="store_true",
help="Use OpenCv acceleration")
optprs.add_option("-p", "--port", dest="port",
type='int', default=9909, metavar="PORT",
help="Default PORT to use for the web socket")
optprs.add_option("--profile", dest="profile", action="store_true",
default=False,
help="Run the profiler on main()")
(options, args) = optprs.parse_args(sys.argv[1:])
# Are we debugging this?
if options.debug:
import pdb
pdb.run('main(options, args)')
# Are we profiling this?
elif options.profile:
import profile
print(("%s profile:" % sys.argv[0]))
profile.run('main(options, args)')
else:
main(options, args)
# END
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Reshape Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.reshape import Reshape
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.platform import test
class _ReshapeBijectorTest(object):
"""Base class for testing the reshape transformation.
Methods defined in this class call a method self.build_shapes() that
is implemented by subclasses defined below, returning respectively
ReshapeBijectorTestStatic: static shapes,
ReshapeBijectorTestDynamic: shape placeholders of known ndims, and
ReshapeBijectorTestDynamicNdims: shape placeholders of unspecified ndims,
so that each test in this base class is automatically run over all
three cases. The subclasses also implement assertRaisesError to test
for either Python exceptions (in the case of static shapes) or
TensorFlow op errors (dynamic shapes).
"""
def setUp(self):
self._rng = np.random.RandomState(42)
def testBijector(self):
"""Do a basic sanity check of forward, inverse, jacobian."""
expected_x = np.random.randn(4, 3, 2)
expected_y = np.reshape(expected_x, [4, 6])
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([3, 2], [6,])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
(x_,
y_,
fldj_,
ildj_) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
bijector.forward_log_det_jacobian(expected_x, event_ndims=2),
bijector.inverse_log_det_jacobian(expected_y, event_ndims=2),
), feed_dict=feed_dict)
self.assertEqual("reshape", bijector.name)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
self.assertAllClose(0., fldj_, rtol=1e-6, atol=0)
self.assertAllClose(0., ildj_, rtol=1e-6, atol=0)
def testEventShapeTensor(self):
"""Test event_shape_tensor methods when even ndims may be dynamic."""
shape_in_static = [2, 3]
shape_out_static = [6,]
shape_in, shape_out, feed_dict = self.build_shapes(shape_in_static,
shape_out_static)
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in, validate_args=True)
# using the _tensor methods, we should always get a fully-specified
# result since these are evaluated at graph runtime.
with self.cached_session() as sess:
(shape_out_,
shape_in_) = sess.run((
bijector.forward_event_shape_tensor(shape_in),
bijector.inverse_event_shape_tensor(shape_out),
), feed_dict=feed_dict)
self.assertAllEqual(shape_out_static, shape_out_)
self.assertAllEqual(shape_in_static, shape_in_)
def testScalarReshape(self):
"""Test reshaping to and from a scalar shape ()."""
expected_x = np.random.randn(4, 3, 1)
expected_y = np.reshape(expected_x, [4, 3])
expected_x_scalar = np.random.randn(1,)
expected_y_scalar = expected_x_scalar[0]
shape_in, shape_out, feed_dict = self.build_shapes([], [1,])
with self.cached_session() as sess:
bijector = Reshape(
event_shape_out=shape_in,
event_shape_in=shape_out, validate_args=True)
(x_,
y_,
x_scalar_,
y_scalar_
) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
bijector.inverse(expected_y_scalar),
bijector.forward(expected_x_scalar),
), feed_dict=feed_dict)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
self.assertAllClose(expected_y_scalar, y_scalar_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x_scalar, x_scalar_, rtol=1e-6, atol=0)
def testMultipleUnspecifiedDimensionsOpError(self):
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([2, 3], [4, -1, -1,])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
with self.assertRaisesError(
"elements must have at most one `-1`."):
sess.run(bijector.forward_event_shape_tensor(shape_in),
feed_dict=feed_dict)
# pylint: disable=invalid-name
def _testInvalidDimensionsOpError(self, expected_error_message):
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([2, 3], [1, 2, -2,])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
with self.assertRaisesError(expected_error_message):
sess.run(bijector.forward_event_shape_tensor(shape_in),
feed_dict=feed_dict)
# pylint: enable=invalid-name
def testValidButNonMatchingInputOpError(self):
x = np.random.randn(4, 3, 2)
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([2, 3], [1, 6, 1,])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
# Here we pass in a tensor (x) whose shape is compatible with
# the output shape, so tf.reshape will throw no error, but
# doesn't match the expected input shape.
with self.assertRaisesError(
"Input `event_shape` does not match `event_shape_in`."):
sess.run(bijector.forward(x),
feed_dict=feed_dict)
def testValidButNonMatchingInputPartiallySpecifiedOpError(self):
x = np.random.randn(4, 3, 2)
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([2, -1], [1, 6, 1,])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
with self.assertRaisesError(
"Input `event_shape` does not match `event_shape_in`."):
sess.run(bijector.forward(x),
feed_dict=feed_dict)
# pylint: disable=invalid-name
def _testInputOutputMismatchOpError(self, expected_error_message):
x1 = np.random.randn(4, 2, 3)
x2 = np.random.randn(4, 1, 1, 5)
with self.cached_session() as sess:
shape_in, shape_out, fd_mismatched = self.build_shapes([2, 3],
[1, 1, 5])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
with self.assertRaisesError(expected_error_message):
sess.run(bijector.forward(x1), feed_dict=fd_mismatched)
with self.assertRaisesError(expected_error_message):
sess.run(bijector.inverse(x2), feed_dict=fd_mismatched)
# pylint: enable=invalid-name
def testOneShapePartiallySpecified(self):
expected_x = np.random.randn(4, 6)
expected_y = np.reshape(expected_x, [4, 2, 3])
with self.cached_session() as sess:
# one of input/output shapes is partially specified
shape_in, shape_out, feed_dict = self.build_shapes([-1,], [2, 3])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
(x_,
y_,
) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
), feed_dict=feed_dict)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
def testBothShapesPartiallySpecified(self):
expected_x = np.random.randn(4, 2, 3)
expected_y = np.reshape(expected_x, [4, 3, 2])
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([-1, 3], [-1, 2])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
(x_,
y_,
) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
), feed_dict=feed_dict)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
def testDefaultVectorShape(self):
expected_x = np.random.randn(4, 4)
expected_y = np.reshape(expected_x, [4, 2, 2])
with self.cached_session() as sess:
_, shape_out, feed_dict = self.build_shapes([-1,], [-1, 2])
bijector = Reshape(shape_out,
validate_args=True)
(x_,
y_,
) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
), feed_dict=feed_dict)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
def build_shapes(self, *args, **kwargs):
raise NotImplementedError("Subclass failed to implement `build_shapes`.")
class ReshapeBijectorTestStatic(test.TestCase, _ReshapeBijectorTest):
def build_shapes(self, shape_in, shape_out):
shape_in_static = shape_in
shape_out_static = shape_out
feed_dict = {}
return shape_in_static, shape_out_static, feed_dict
def assertRaisesError(self, msg):
return self.assertRaisesRegexp(Exception, msg)
def testEventShape(self):
shape_in_static = tensor_shape.TensorShape([2, 3])
shape_out_static = tensor_shape.TensorShape([6,])
bijector = Reshape(
event_shape_out=shape_out_static,
event_shape_in=shape_in_static, validate_args=True)
# test that forward_ and inverse_event_shape do sensible things
# when shapes are statically known.
self.assertEqual(
bijector.forward_event_shape(shape_in_static),
shape_out_static)
self.assertEqual(
bijector.inverse_event_shape(shape_out_static),
shape_in_static)
def testBijectiveAndFinite(self):
x = np.random.randn(4, 2, 3)
y = np.reshape(x, [4, 1, 2, 3])
with self.cached_session():
bijector = Reshape(
event_shape_in=[2, 3],
event_shape_out=[1, 2, 3],
validate_args=True)
assert_bijective_and_finite(
bijector, x, y, event_ndims=2, rtol=1e-6, atol=0)
def testInvalidDimensionsOpError(self):
self._testInvalidDimensionsOpError(
"Invalid value in tensor used for shape: -2")
def testInputOutputMismatchOpError(self):
self._testInputOutputMismatchOpError("Cannot reshape a tensor with")
class ReshapeBijectorTestDynamic(test.TestCase, _ReshapeBijectorTest):
def build_shapes(self, shape_in, shape_out):
shape_in_ph = array_ops.placeholder(shape=(len(shape_in),),
dtype=dtypes.int32)
shape_out_ph = array_ops.placeholder(shape=(len(shape_out),),
dtype=dtypes.int32)
feed_dict = {shape_in_ph: shape_in, shape_out_ph: shape_out}
return shape_in_ph, shape_out_ph, feed_dict
def assertRaisesError(self, msg):
return self.assertRaisesOpError(msg)
def testInvalidDimensionsOpError(self):
self._testInvalidDimensionsOpError(
"elements must be either positive integers or `-1`.")
def testInputOutputMismatchOpError(self):
self._testInputOutputMismatchOpError("Input to reshape is a tensor with")
class ReshapeBijectorTestDynamicNdims(test.TestCase, _ReshapeBijectorTest):
def build_shapes(self, shape_in, shape_out):
shape_in_ph = array_ops.placeholder(shape=None, dtype=dtypes.int32)
shape_out_ph = array_ops.placeholder(shape=None, dtype=dtypes.int32)
feed_dict = {shape_in_ph: shape_in, shape_out_ph: shape_out}
return shape_in_ph, shape_out_ph, feed_dict
def assertRaisesError(self, msg):
return self.assertRaisesOpError(msg)
def testInvalidDimensionsOpError(self):
self._testInvalidDimensionsOpError(
"elements must be either positive integers or `-1`.")
def testInputOutputMismatchOpError(self):
self._testInputOutputMismatchOpError("Input to reshape is a tensor with")
if __name__ == "__main__":
test.main()
|
|
from otp.ai.AIBaseGlobal import *
from direct.distributed.ClockDelta import *
import DistributedBossCogAI
from direct.directnotify import DirectNotifyGlobal
from otp.avatar import DistributedAvatarAI
import DistributedSuitAI
from toontown.battle import BattleExperienceAI
from direct.fsm import FSM
from toontown.toonbase import ToontownGlobals
from toontown.toon import InventoryBase
from toontown.toonbase import TTLocalizer
from toontown.battle import BattleBase
from toontown.toon import NPCToons
from toontown.building import SuitBuildingGlobals
import SuitDNA
import random
from toontown.coghq import DistributedLawbotBossGavelAI
from toontown.suit import DistributedLawbotBossSuitAI
from toontown.coghq import DistributedLawbotCannonAI
from toontown.coghq import DistributedLawbotChairAI
from toontown.toonbase import ToontownBattleGlobals
class DistributedLawbotBossAI(DistributedBossCogAI.DistributedBossCogAI, FSM.FSM):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedLawbotBossAI')
limitHitCount = 6
hitCountDamage = 35
numPies = 10
maxToonLevels = 77
def __init__(self, air):
DistributedBossCogAI.DistributedBossCogAI.__init__(self, air, 'l')
FSM.FSM.__init__(self, 'DistributedLawbotBossAI')
self.lawyers = []
self.cannons = None
self.chairs = None
self.gavels = None
self.cagedToonNpcId = random.choice(NPCToons.npcFriends.keys())
self.bossMaxDamage = ToontownGlobals.LawbotBossMaxDamage
self.recoverRate = 0
self.recoverStartTime = 0
self.bossDamage = ToontownGlobals.LawbotBossInitialDamage
self.useCannons = 1
self.numToonJurorsSeated = 0
self.cannonBallsLeft = {}
self.toonLevels = 0
if 'Defeat' not in self.keyStates:
self.keyStates.append('Defeat')
self.toonupValue = 1
self.bonusState = False
self.bonusTimeStarted = 0
self.numBonusStates = 0
self.battleThreeTimeStarted = 0
self.battleThreeTimeInMin = 0
self.numAreaAttacks = 0
self.lastAreaAttackTime = 0
self.weightPerToon = {}
self.cannonIndexPerToon = {}
self.battleDifficulty = 0
return
def delete(self):
self.notify.debug('DistributedLawbotBossAI.delete')
self.__deleteBattleThreeObjects()
self.__deleteBattleTwoObjects()
taskName = self.uniqueName('clearBonus')
taskMgr.remove(taskName)
return DistributedBossCogAI.DistributedBossCogAI.delete(self)
def getHoodId(self):
return ToontownGlobals.LawbotHQ
def getCagedToonNpcId(self):
return self.cagedToonNpcId
def magicWordHit(self, damage, avId):
if self.attackCode != ToontownGlobals.BossCogDizzyNow:
self.hitBossInsides()
self.hitBoss(damage)
def hitBoss(self, bossDamage):
avId = self.air.getAvatarIdFromSender()
if not self.validate(avId, avId in self.involvedToons, 'hitBoss from unknown avatar'):
return
self.validate(avId, bossDamage == 1, 'invalid bossDamage %s' % bossDamage)
if bossDamage < 1:
return
currState = self.getCurrentOrNextState()
if currState != 'BattleThree':
return
if bossDamage <= 12:
newWeight = self.weightPerToon.get(avId)
if newWeight:
bossDamage = newWeight
if self.bonusState and bossDamage <= 12:
bossDamage *= ToontownGlobals.LawbotBossBonusWeightMultiplier
bossDamage = min(self.getBossDamage() + bossDamage, self.bossMaxDamage)
self.b_setBossDamage(bossDamage, 0, 0)
if self.bossDamage >= self.bossMaxDamage:
self.b_setState('Victory')
else:
self.__recordHit()
def healBoss(self, bossHeal):
bossDamage = -bossHeal
avId = self.air.getAvatarIdFromSender()
currState = self.getCurrentOrNextState()
if currState != 'BattleThree':
return
bossDamage = min(self.getBossDamage() + bossDamage, self.bossMaxDamage)
bossDamage = max(bossDamage, 0)
self.b_setBossDamage(bossDamage, 0, 0)
if self.bossDamage == 0:
self.b_setState('Defeat')
else:
self.__recordHit()
def hitBossInsides(self):
avId = self.air.getAvatarIdFromSender()
if not self.validate(avId, avId in self.involvedToons, 'hitBossInsides from unknown avatar'):
return
currState = self.getCurrentOrNextState()
if currState != 'BattleThree':
return
self.b_setAttackCode(ToontownGlobals.BossCogDizzyNow)
self.b_setBossDamage(self.getBossDamage(), 0, 0)
def hitToon(self, toonId):
avId = self.air.getAvatarIdFromSender()
if not self.validate(avId, avId != toonId, 'hitToon on self'):
return
if avId not in self.involvedToons or toonId not in self.involvedToons:
return
toon = self.air.doId2do.get(toonId)
if toon:
self.healToon(toon, self.toonupValue)
self.sendUpdate('toonGotHealed', [toonId])
def touchCage(self):
avId = self.air.getAvatarIdFromSender()
currState = self.getCurrentOrNextState()
if currState != 'BattleThree' and currState != 'NearVictory':
return
if not self.validate(avId, avId in self.involvedToons, 'touchCage from unknown avatar'):
return
toon = simbase.air.doId2do.get(avId)
if toon:
toon.b_setNumPies(self.numPies)
toon.__touchedCage = 1
def touchWitnessStand(self):
self.touchCage()
def finalPieSplat(self):
self.notify.debug('finalPieSplat')
if self.state != 'NearVictory':
return
self.b_setState('Victory')
def doTaunt(self):
if not self.state == 'BattleThree':
return
tauntIndex = random.randrange(len(TTLocalizer.LawbotBossTaunts))
extraInfo = 0
if tauntIndex == 0 and self.involvedToons:
extraInfo = random.randrange(len(self.involvedToons))
self.sendUpdate('setTaunt', [tauntIndex, extraInfo])
def doNextAttack(self, task):
for lawyer in self.lawyers:
lawyer.doNextAttack(self)
self.waitForNextAttack(ToontownGlobals.LawbotBossLawyerCycleTime)
timeSinceLastAttack = globalClock.getFrameTime() - self.lastAreaAttackTime
allowedByTime = 15 < timeSinceLastAttack or self.lastAreaAttackTime == 0
doAttack = random.randrange(1,101)
self.notify.debug('allowedByTime=%d doAttack=%d' % (allowedByTime, doAttack))
if doAttack <= ToontownGlobals.LawbotBossChanceToDoAreaAttack and allowedByTime:
self.__doAreaAttack()
self.numAreaAttacks += 1
self.lastAreaAttackTime = globalClock.getFrameTime()
else:
chanceToDoTaunt = ToontownGlobals.LawbotBossChanceForTaunt
action = random.randrange(1,101)
if action <= chanceToDoTaunt:
self.doTaunt()
pass
return
if self.attackCode == ToontownGlobals.BossCogDizzyNow:
attackCode = ToontownGlobals.BossCogRecoverDizzyAttack
else:
attackCode = random.choice([ToontownGlobals.BossCogAreaAttack,
ToontownGlobals.BossCogFrontAttack,
ToontownGlobals.BossCogDirectedAttack,
ToontownGlobals.BossCogDirectedAttack,
ToontownGlobals.BossCogDirectedAttack,
ToontownGlobals.BossCogDirectedAttack])
if attackCode == ToontownGlobals.BossCogAreaAttack:
self.__doAreaAttack()
elif attackCode == ToontownGlobals.BossCogDirectedAttack:
self.__doDirectedAttack()
else:
self.b_setAttackCode(attackCode)
def __doAreaAttack(self):
self.b_setAttackCode(ToontownGlobals.BossCogAreaAttack)
def __doDirectedAttack(self):
if self.nearToons:
toonId = random.choice(self.nearToons)
self.b_setAttackCode(ToontownGlobals.BossCogDirectedAttack, toonId)
else:
self.__doAreaAttack()
def b_setBossDamage(self, bossDamage, recoverRate, recoverStartTime):
self.d_setBossDamage(bossDamage, recoverRate, recoverStartTime)
self.setBossDamage(bossDamage, recoverRate, recoverStartTime)
def setBossDamage(self, bossDamage, recoverRate, recoverStartTime):
self.bossDamage = bossDamage
self.recoverRate = recoverRate
self.recoverStartTime = recoverStartTime
def getBossDamage(self):
now = globalClock.getFrameTime()
elapsed = now - self.recoverStartTime
return int(max(self.bossDamage - self.recoverRate * elapsed / 60.0, 0))
def d_setBossDamage(self, bossDamage, recoverRate, recoverStartTime):
timestamp = globalClockDelta.localToNetworkTime(recoverStartTime)
self.sendUpdate('setBossDamage', [bossDamage, recoverRate, timestamp])
def waitForNextStrafe(self, delayTime):
currState = self.getCurrentOrNextState()
if currState == 'BattleThree':
taskName = self.uniqueName('NextStrafe')
taskMgr.remove(taskName)
taskMgr.doMethodLater(delayTime, self.doNextStrafe, taskName)
def stopStrafes(self):
taskName = self.uniqueName('NextStrafe')
taskMgr.remove(taskName)
def doNextStrafe(self, task):
if self.attackCode != ToontownGlobals.BossCogDizzyNow:
side = random.choice([0, 1])
direction = random.choice([0, 1])
self.sendUpdate('doStrafe', [side, direction])
delayTime = 9
self.waitForNextStrafe(delayTime)
def __sendLawyerIds(self):
lawyerIds = []
for suit in self.lawyers:
lawyerIds.append(suit.doId)
self.sendUpdate('setLawyerIds', [lawyerIds])
def d_cagedToonBattleThree(self, index, avId):
self.sendUpdate('cagedToonBattleThree', [index, avId])
def formatReward(self):
return str(self.cagedToonNpcId)
def makeBattleOneBattles(self):
self.postBattleState = 'RollToBattleTwo'
self.initializeBattles(1, ToontownGlobals.LawbotBossBattleOnePosHpr)
def generateSuits(self, battleNumber):
if battleNumber == 1:
weakenedValue = ((1, 1),
(2, 2),
(2, 2),
(1, 1),
(1, 1, 1, 1, 1))
listVersion = list(SuitBuildingGlobals.SuitBuildingInfo)
if simbase.config.GetBool('lawbot-boss-cheat', 0):
listVersion[13] = weakenedValue
SuitBuildingGlobals.SuitBuildingInfo = tuple(listVersion)
return self.invokeSuitPlanner(13, 0)
else:
return self.invokeSuitPlanner(13, 1)
def removeToon(self, avId):
toon = simbase.air.doId2do.get(avId)
if toon:
toon.b_setNumPies(0)
DistributedBossCogAI.DistributedBossCogAI.removeToon(self, avId)
def enterOff(self):
self.notify.debug('enterOff')
DistributedBossCogAI.DistributedBossCogAI.enterOff(self)
self.__deleteBattleThreeObjects()
self.__resetLawyers()
def enterElevator(self):
self.notify.debug('enterElevatro')
DistributedBossCogAI.DistributedBossCogAI.enterElevator(self)
self.b_setBossDamage(ToontownGlobals.LawbotBossInitialDamage, 0, 0)
def enterIntroduction(self):
self.notify.debug('enterIntroduction')
DistributedBossCogAI.DistributedBossCogAI.enterIntroduction(self)
self.b_setBossDamage(ToontownGlobals.LawbotBossInitialDamage, 0, 0)
self.__makeChairs()
def exitIntroduction(self):
self.notify.debug('exitIntroduction')
DistributedBossCogAI.DistributedBossCogAI.exitIntroduction(self)
def enterRollToBattleTwo(self):
self.divideToons()
self.__makeCannons()
self.barrier = self.beginBarrier('RollToBattleTwo', self.involvedToons, 50, self.__doneRollToBattleTwo)
def __doneRollToBattleTwo(self, avIds):
self.b_setState('PrepareBattleTwo')
def exitRollToBattleTwo(self):
self.ignoreBarrier(self.barrier)
def enterPrepareBattleTwo(self):
self.__makeCannons()
self.barrier = self.beginBarrier('PrepareBattleTwo', self.involvedToons, 45, self.__donePrepareBattleTwo)
self.makeBattleTwoBattles()
def __donePrepareBattleTwo(self, avIds):
self.b_setState('BattleTwo')
def exitPrepareBattleTwo(self):
self.ignoreBarrier(self.barrier)
def __makeCannons(self):
if self.cannons == None:
self.cannons = []
startPt = Point3(*ToontownGlobals.LawbotBossCannonPosA)
endPt = Point3(*ToontownGlobals.LawbotBossCannonPosB)
totalDisplacement = endPt - startPt
self.notify.debug('totalDisplacement=%s' % totalDisplacement)
numToons = len(self.involvedToons)
stepDisplacement = totalDisplacement / (numToons + 1)
for index in xrange(numToons):
newPos = stepDisplacement * (index + 1)
self.notify.debug('curDisplacement = %s' % newPos)
newPos += startPt
self.notify.debug('newPos = %s' % newPos)
cannon = DistributedLawbotCannonAI.DistributedLawbotCannonAI(self.air, self, index, newPos[0], newPos[1], newPos[2], -90, 0, 0)
cannon.generateWithRequired(self.zoneId)
self.cannons.append(cannon)
return
def __makeChairs(self):
if self.chairs == None:
self.chairs = []
for index in xrange(12):
chair = DistributedLawbotChairAI.DistributedLawbotChairAI(self.air, self, index)
chair.generateWithRequired(self.zoneId)
self.chairs.append(chair)
return
def __makeBattleTwoObjects(self):
self.__makeCannons()
self.__makeChairs()
def __deleteCannons(self):
if self.cannons != None:
for cannon in self.cannons:
cannon.requestDelete()
self.cannons = None
return
def __deleteChairs(self):
if self.chairs != None:
for chair in self.chairs:
chair.requestDelete()
self.chairs = None
return
def __stopChairs(self):
if self.chairs != None:
for chair in self.chairs:
chair.stopCogs()
return
def __deleteBattleTwoObjects(self):
self.__deleteCannons()
self.__deleteChairs()
def getCannonBallsLeft(self, avId):
if self.cannonBallsLeft.has_key(avId):
return self.cannonBallsLeft[avId]
else:
self.notify.warning('getCannonBalsLeft invalid avId: %d' % avId)
return 0
def decrementCannonBallsLeft(self, avId):
if self.cannonBallsLeft.has_key(avId):
self.cannonBallsLeft[avId] -= 1
if self.cannonBallsLeft[avId] < 0:
self.notify.warning('decrementCannonBallsLeft <0 cannonballs for %d' % avId)
self.cannonBallsLeft[avId] = 0
else:
self.notify.warning('decrementCannonBallsLeft invalid avId: %d' % avId)
def makeBattleTwoBattles(self):
self.postBattleState = 'RollToBattleThree'
if self.useCannons:
self.__makeBattleTwoObjects()
else:
self.initializeBattles(2, ToontownGlobals.LawbotBossBattleTwoPosHpr)
def enterBattleTwo(self):
if self.useCannons:
self.cannonBallsLeft = {}
for toonId in self.involvedToons:
self.cannonBallsLeft[toonId] = ToontownGlobals.LawbotBossCannonBallMax
for chair in self.chairs:
chair.requestEmptyJuror()
self.barrier = self.beginBarrier('BattleTwo', self.involvedToons, ToontownGlobals.LawbotBossJuryBoxMoveTime + 1, self.__doneBattleTwo)
if not self.useCannons:
if self.battleA:
self.battleA.startBattle(self.toonsA, self.suitsA)
if self.battleB:
self.battleB.startBattle(self.toonsB, self.suitsB)
def __doneBattleTwo(self, avIds):
if self.useCannons:
self.b_setState('PrepareBattleThree')
else:
self.b_setState('RollToBattleThree')
def exitBattleTwo(self):
self.resetBattles()
self.numToonJurorsSeated = 0
for chair in self.chairs:
self.notify.debug('chair.state==%s' % chair.state)
if chair.state == 'ToonJuror':
self.numToonJurorsSeated += 1
self.notify.debug('numToonJurorsSeated=%d' % self.numToonJurorsSeated)
self.air.writeServerEvent('jurorsSeated', self.doId, '%s|%s|%s' % (self.dept, self.involvedToons, self.numToonJurorsSeated))
self.__deleteCannons()
self.__stopChairs()
def enterRollToBattleThree(self):
self.divideToons()
self.barrier = self.beginBarrier('RollToBattleThree', self.involvedToons, 20, self.__doneRollToBattleThree)
def __doneRollToBattleThree(self, avIds):
self.b_setState('PrepareBattleThree')
def exitRollToBattleThree(self):
self.ignoreBarrier(self.barrier)
def enterPrepareBattleThree(self):
self.calcAndSetBattleDifficulty()
self.barrier = self.beginBarrier('PrepareBattleThree', self.involvedToons, 45, self.__donePrepareBattleThree)
def __donePrepareBattleThree(self, avIds):
self.b_setState('BattleThree')
def exitPrepareBattleThree(self):
self.ignoreBarrier(self.barrier)
def enterBattleThree(self):
self.battleThreeTimeStarted = globalClock.getFrameTime()
self.calcAndSetBattleDifficulty()
self.calculateWeightPerToon()
diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty]
self.ammoCount = diffSettings[0]
self.numGavels = diffSettings[1]
if self.numGavels >= len(ToontownGlobals.LawbotBossGavelPosHprs):
self.numGavels = len(ToontownGlobals.LawbotBossGavelPosHprs)
self.numLawyers = diffSettings[2]
if self.numLawyers >= len(ToontownGlobals.LawbotBossLawyerPosHprs):
self.numLawyers = len(ToontownGlobals.LawbotBossLawyerPosHprs)
self.toonupValue = diffSettings[3]
self.notify.debug('diffLevel=%d ammoCount=%d gavels=%d lawyers = %d, toonup=%d' % (self.battleDifficulty,
self.ammoCount,
self.numGavels,
self.numLawyers,
self.toonupValue))
self.air.writeServerEvent('lawbotBossSettings', self.doId, '%s|%s|%s|%s|%s|%s' % (self.dept,
self.battleDifficulty,
self.ammoCount,
self.numGavels,
self.numLawyers,
self.toonupValue))
self.__makeBattleThreeObjects()
self.__makeLawyers()
self.numPies = self.ammoCount
self.resetBattles()
self.setPieType()
jurorsOver = self.numToonJurorsSeated - ToontownGlobals.LawbotBossJurorsForBalancedScale
dmgAdjust = jurorsOver * ToontownGlobals.LawbotBossDamagePerJuror
self.b_setBossDamage(ToontownGlobals.LawbotBossInitialDamage + dmgAdjust, 0, 0)
if simbase.config.GetBool('lawbot-boss-cheat', 0):
self.b_setBossDamage(ToontownGlobals.LawbotBossMaxDamage - 1, 0, 0)
self.battleThreeStart = globalClock.getFrameTime()
for toonId in self.involvedToons:
toon = simbase.air.doId2do.get(toonId)
if toon:
toon.__touchedCage = 0
for aGavel in self.gavels:
aGavel.turnOn()
self.waitForNextAttack(5)
self.notify.debug('battleDifficulty = %d' % self.battleDifficulty)
self.numToonsAtStart = len(self.involvedToons)
def getToonDifficulty(self):
highestCogSuitLevel = 0
totalCogSuitLevels = 0.0
totalNumToons = 0.0
for toonId in self.involvedToons:
toon = simbase.air.doId2do.get(toonId)
if toon:
toonLevel = toon.getNumPromotions(self.dept)
totalCogSuitLevels += toonLevel
totalNumToons += 1
if toon.cogLevels > highestCogSuitLevel:
highestCogSuitLevel = toonLevel
if not totalNumToons:
totalNumToons = 1.0
averageLevel = totalCogSuitLevels / totalNumToons
self.notify.debug('toons average level = %f, highest level = %d' % (averageLevel, highestCogSuitLevel))
retval = min(averageLevel, self.maxToonLevels)
return retval
def __saySomething(self, task = None):
index = None
avId = 0
if len(self.involvedToons) == 0:
return
avId = random.choice(self.involvedToons)
toon = simbase.air.doId2do.get(avId)
if toon.__touchedCage:
if self.cagedToonDialogIndex <= TTLocalizer.CagedToonBattleThreeMaxAdvice:
index = self.cagedToonDialogIndex
self.cagedToonDialogIndex += 1
elif random.random() < 0.2:
index = random.randrange(100, TTLocalizer.CagedToonBattleThreeMaxAdvice + 1)
else:
index = random.randrange(20, TTLocalizer.CagedToonBattleThreeMaxTouchCage + 1)
if index:
self.d_cagedToonBattleThree(index, avId)
self.__saySomethingLater()
return
def __saySomethingLater(self, delayTime = 15):
taskName = self.uniqueName('CagedToonSaySomething')
taskMgr.remove(taskName)
taskMgr.doMethodLater(delayTime, self.__saySomething, taskName)
def __goodJump(self, avId):
currState = self.getCurrentOrNextState()
if currState != 'BattleThree':
return
index = random.randrange(10, TTLocalizer.CagedToonBattleThreeMaxGivePies + 1)
self.d_cagedToonBattleThree(index, avId)
self.__saySomethingLater()
def __makeBattleThreeObjects(self):
if self.gavels == None:
self.gavels = []
for index in xrange(self.numGavels):
gavel = DistributedLawbotBossGavelAI.DistributedLawbotBossGavelAI(self.air, self, index)
gavel.generateWithRequired(self.zoneId)
self.gavels.append(gavel)
return
def __deleteBattleThreeObjects(self):
if self.gavels != None:
for gavel in self.gavels:
gavel.request('Off')
gavel.requestDelete()
self.gavels = None
return
def doBattleThreeInfo(self):
didTheyWin = 0
if self.bossDamage == ToontownGlobals.LawbotBossMaxDamage:
didTheyWin = 1
self.battleThreeTimeInMin = globalClock.getFrameTime() - self.battleThreeTimeStarted
self.battleThreeTimeInMin /= 60.0
self.numToonsAtEnd = 0
toonHps = []
for toonId in self.involvedToons:
toon = simbase.air.doId2do.get(toonId)
if toon:
self.numToonsAtEnd += 1
toonHps.append(toon.hp)
self.air.writeServerEvent('b3Info', self.doId, '%d|%.2f|%d|%d|%d|%d|%d|%d|%d|%d|%d|%d|%s|%s' % (didTheyWin,
self.battleThreeTimeInMin,
self.numToonsAtStart,
self.numToonsAtEnd,
self.numToonJurorsSeated,
self.battleDifficulty,
self.ammoCount,
self.numGavels,
self.numLawyers,
self.toonupValue,
self.numBonusStates,
self.numAreaAttacks,
toonHps,
self.weightPerToon))
def exitBattleThree(self):
self.doBattleThreeInfo()
self.stopAttacks()
self.stopStrafes()
taskName = self.uniqueName('CagedToonSaySomething')
taskMgr.remove(taskName)
self.__resetLawyers()
self.__deleteBattleThreeObjects()
def enterNearVictory(self):
self.resetBattles()
def exitNearVictory(self):
pass
def enterVictory(self):
self.resetBattles()
self.suitsKilled.append({'type': None,
'level': None,
'track': self.dna.dept,
'isSkelecog': 0,
'isForeman': 0,
'isVP': 1,
'isCFO': 0,
'isSupervisor': 0,
'isVirtual': 0,
'activeToons': self.involvedToons[:]})
self.barrier = self.beginBarrier('Victory', self.involvedToons, 30, self.__doneVictory)
return
def __doneVictory(self, avIds):
self.d_setBattleExperience()
self.b_setState('Reward')
BattleExperienceAI.assignRewards(self.involvedToons, self.toonSkillPtsGained, self.suitsKilled, ToontownGlobals.dept2cogHQ(self.dept), self.helpfulToons)
preferredDept = random.randrange(len(SuitDNA.suitDepts))
typeWeights = ['single'] * 70 + ['building'] * 27 + ['invasion'] * 3
preferredSummonType = random.choice(typeWeights)
for toonId in self.involvedToons:
toon = self.air.doId2do.get(toonId)
if toon:
self.giveCogSummonReward(toon, preferredDept, preferredSummonType)
toon.b_promote(self.deptIndex)
def giveCogSummonReward(self, toon, prefDeptIndex, prefSummonType):
cogLevel = int(self.toonLevels / self.maxToonLevels * SuitDNA.suitsPerDept)
cogLevel = min(cogLevel, SuitDNA.suitsPerDept - 1)
deptIndex = prefDeptIndex
summonType = prefSummonType
hasSummon = toon.hasParticularCogSummons(prefDeptIndex, cogLevel, prefSummonType)
if hasSummon:
self.notify.debug('trying to find another reward')
if not toon.hasParticularCogSummons(prefDeptIndex, cogLevel, 'single'):
summonType = 'single'
elif not toon.hasParticularCogSummons(prefDeptIndex, cogLevel, 'building'):
summonType = 'building'
elif not toon.hasParticularCogSummons(prefDeptIndex, cogLevel, 'invasion'):
summonType = 'invasion'
else:
foundOne = False
for curDeptIndex in xrange(len(SuitDNA.suitDepts)):
if not toon.hasParticularCogSummons(curDeptIndex, cogLevel, prefSummonType):
deptIndex = curDeptIndex
foundOne = True
break
elif not toon.hasParticularCogSummons(curDeptIndex, cogLevel, 'single'):
deptIndex = curDeptIndex
summonType = 'single'
foundOne = True
break
elif not toon.hasParticularCogSummons(curDeptIndex, cogLevel, 'building'):
deptIndex = curDeptIndex
summonType = 'building'
foundOne = True
break
elif not toon.hasParticularCogSummons(curDeptIndex, cogLevel, 'invasion'):
summonType = 'invasion'
deptIndex = curDeptIndex
foundOne = True
break
possibleCogLevel = range(SuitDNA.suitsPerDept)
possibleDeptIndex = range(len(SuitDNA.suitDepts))
possibleSummonType = ['single', 'building', 'invasion']
typeWeights = ['single'] * 70 + ['building'] * 27 + ['invasion'] * 3
if not foundOne:
for i in xrange(5):
randomCogLevel = random.choice(possibleCogLevel)
randomSummonType = random.choice(typeWeights)
randomDeptIndex = random.choice(possibleDeptIndex)
if not toon.hasParticularCogSummons(randomDeptIndex, randomCogLevel, randomSummonType):
foundOne = True
cogLevel = randomCogLevel
summonType = randomSummonType
deptIndex = randomDeptIndex
break
for curType in possibleSummonType:
if foundOne:
break
for curCogLevel in possibleCogLevel:
if foundOne:
break
for curDeptIndex in possibleDeptIndex:
if foundOne:
break
if not toon.hasParticularCogSummons(curDeptIndex, curCogLevel, curType):
foundOne = True
cogLevel = curCogLevel
summonType = curType
deptIndex = curDeptIndex
if not foundOne:
cogLevel = None
summonType = None
deptIndex = None
toon.assignNewCogSummons(cogLevel, summonType, deptIndex)
return
def exitVictory(self):
self.takeAwayPies()
def enterDefeat(self):
self.resetBattles()
self.barrier = self.beginBarrier('Defeat', self.involvedToons, 10, self.__doneDefeat)
def __doneDefeat(self, avIds):
for toonId in self.involvedToons:
toon = self.air.doId2do.get(toonId)
if toon:
toon.b_setHp(0)
def exitDefeat(self):
self.takeAwayPies()
def enterFrolic(self):
DistributedBossCogAI.DistributedBossCogAI.enterFrolic(self)
self.b_setBossDamage(0, 0, 0)
def setPieType(self):
for toonId in self.involvedToons:
toon = simbase.air.doId2do.get(toonId)
if toon:
toon.d_setPieType(ToontownBattleGlobals.MAX_TRACK_INDEX + 1)
def takeAwayPies(self):
for toonId in self.involvedToons:
toon = simbase.air.doId2do.get(toonId)
if toon:
toon.b_setNumPies(0)
def __recordHit(self):
now = globalClock.getFrameTime()
self.hitCount += 1
if self.hitCount < self.limitHitCount or self.bossDamage < self.hitCountDamage:
return
def __resetLawyers(self):
for suit in self.lawyers:
suit.requestDelete()
self.lawyers = []
def __makeLawyers(self):
self.__resetLawyers()
lawCogChoices = ['b',
'dt',
'ac',
'bs',
'sd',
'le',
'bw']
for i in xrange(self.numLawyers):
suit = DistributedLawbotBossSuitAI.DistributedLawbotBossSuitAI(self.air, None)
suit.dna = SuitDNA.SuitDNA()
lawCog = random.choice(lawCogChoices)
suit.dna.newSuit(lawCog)
suit.setPosHpr(*ToontownGlobals.LawbotBossLawyerPosHprs[i])
suit.setBoss(self)
suit.generateWithRequired(self.zoneId)
self.lawyers.append(suit)
self.__sendLawyerIds()
return
def hitChair(self, chairIndex, npcToonIndex):
avId = self.air.getAvatarIdFromSender()
if not self.validate(avId, avId in self.involvedToons, 'hitChair from unknown avatar'):
return
if not self.chairs:
return
if chairIndex < 0 or chairIndex >= len(self.chairs):
self.notify.warning('invalid chairIndex = %d' % chairIndex)
return
if not self.state == 'BattleTwo':
return
self.chairs[chairIndex].b_setToonJurorIndex(npcToonIndex)
self.chairs[chairIndex].requestToonJuror()
def clearBonus(self, taskName):
if self and hasattr(self, 'bonusState'):
self.bonusState = False
def startBonusState(self):
self.notify.debug('startBonusState')
self.bonusTimeStarted = globalClock.getFrameTime()
self.bonusState = True
self.numBonusStates += 1
for toonId in self.involvedToons:
toon = self.air.doId2do.get(toonId)
if toon:
self.healToon(toon, ToontownGlobals.LawbotBossBonusToonup)
taskMgr.doMethodLater(ToontownGlobals.LawbotBossBonusDuration, self.clearBonus, self.uniqueName('clearBonus'))
self.sendUpdate('enteredBonusState', [])
def areAllLawyersStunned(self):
for lawyer in self.lawyers:
if not lawyer.stunned:
return False
return True
def checkForBonusState(self):
if self.bonusState:
return
if not self.areAllLawyersStunned():
return
curTime = globalClock.getFrameTime()
delta = curTime - self.bonusTimeStarted
if ToontownGlobals.LawbotBossBonusWaitTime < delta:
self.startBonusState()
def toonEnteredCannon(self, toonId, cannonIndex):
self.cannonIndexPerToon[toonId] = cannonIndex
def numJurorsSeatedByCannon(self, cannonIndex):
retVal = 0
for chair in self.chairs:
if chair.state == 'ToonJuror':
if chair.toonJurorIndex == cannonIndex:
retVal += 1
return retVal
def calculateWeightPerToon(self):
for toonId in self.involvedToons:
defaultWeight = 1
bonusWeight = 0
cannonIndex = self.cannonIndexPerToon.get(toonId)
if not cannonIndex == None:
diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty]
if diffSettings[4]:
bonusWeight = self.numJurorsSeatedByCannon(cannonIndex) - diffSettings[5]
if bonusWeight < 0:
bonusWeight = 0
newWeight = defaultWeight + bonusWeight
self.weightPerToon[toonId] = newWeight
self.notify.debug('toon %d has weight of %d' % (toonId, newWeight))
return
def b_setBattleDifficulty(self, batDiff):
self.setBattleDifficulty(batDiff)
self.d_setBattleDifficulty(batDiff)
def setBattleDifficulty(self, batDiff):
self.battleDifficulty = batDiff
def d_setBattleDifficulty(self, batDiff):
self.sendUpdate('setBattleDifficulty', [batDiff])
def calcAndSetBattleDifficulty(self):
self.toonLevels = self.getToonDifficulty()
numDifficultyLevels = len(ToontownGlobals.LawbotBossDifficultySettings)
battleDifficulty = int(self.toonLevels / self.maxToonLevels * numDifficultyLevels)
if battleDifficulty >= numDifficultyLevels:
battleDifficulty = numDifficultyLevels - 1
self.b_setBattleDifficulty(battleDifficulty)
|
|
import os
import time
from tools.assertions import assert_all, assert_none, assert_one
from dtest import Tester, debug
from sstable_generation_loading_test import BaseSStableLoaderTest
from tools.decorators import since
from tools.misc import new_node
LEGACY_SSTABLES_JVM_ARGS = ["-Dcassandra.streamdes.initial_mem_buffer_size=1",
"-Dcassandra.streamdes.max_mem_buffer_size=5",
"-Dcassandra.streamdes.max_spill_file_size=128"]
@since('3.0')
class TestStorageEngineUpgrade(Tester):
def setUp(self, bootstrap=False, jvm_args=None):
super(TestStorageEngineUpgrade, self).setUp()
self.default_install_dir = self.cluster.get_install_dir()
self.bootstrap = bootstrap
if jvm_args is None:
jvm_args = []
self.jvm_args = jvm_args
def _setup_cluster(self, create_keyspace=True):
cluster = self.cluster
# Forcing cluster version on purpose
cluster.set_install_dir(version="2.1.9")
cluster.populate(1).start()
node1 = cluster.nodelist()[0]
cursor = self.patient_cql_connection(node1)
if create_keyspace:
cursor.execute("CREATE KEYSPACE ks WITH replication = {'class':'SimpleStrategy', 'replication_factor': 1};")
cursor.execute('USE ks')
return cursor
def _do_upgrade(self, login_keyspace=True):
cluster = self.cluster
node1 = cluster.nodelist()[0]
node1.flush()
time.sleep(.5)
node1.stop(wait_other_notice=True)
node1.set_install_dir(install_dir=self.default_install_dir)
node1.start(wait_other_notice=True, wait_for_binary_proto=True)
if self.bootstrap:
cluster.set_install_dir(install_dir=self.default_install_dir)
# Add a new node, bootstrap=True ensures that it is not a seed
node2 = new_node(cluster, bootstrap=True)
node2.start(wait_for_binary_proto=True, jvm_args=self.jvm_args)
temp_files = self.glob_data_dirs(os.path.join('*', "tmp", "*.dat"))
debug("temp files: " + str(temp_files))
self.assertEquals(0, len(temp_files), "Temporary files were not cleaned up.")
cursor = self.patient_cql_connection(node1)
if login_keyspace:
cursor.execute('USE ks')
return cursor
def update_and_drop_column_test(self):
"""
Checks that dropped columns are properly handled in legacy sstables
@jira_ticket CASSANDRA-11018
"""
cursor = self._setup_cluster()
cursor.execute('CREATE TABLE t (k text PRIMARY KEY, a int, b int)')
cursor.execute("INSERT INTO t(k, a, b) VALUES ('some_key', 0, 0)")
cursor = self._do_upgrade()
cursor.execute("ALTER TABLE t DROP b")
self.cluster.compact()
assert_one(cursor, "SELECT * FROM t", ['some_key', 0])
def upgrade_with_clustered_CQL_table_test(self):
"""
Validates we can do basic slice queries (forward and reverse ones) on legacy sstables for a CQL table
with a clustering column.
"""
self.upgrade_with_clustered_table()
def upgrade_with_clustered_compact_table_test(self):
"""
Validates we can do basic slice queries (forward and reverse ones) on legacy sstables for a COMPACT table
with a clustering column.
"""
self.upgrade_with_clustered_table(compact_storage=True)
def upgrade_with_unclustered_CQL_table_test(self):
"""
Validates we can do basic name queries on legacy sstables for a CQL table without clustering.
"""
self.upgrade_with_unclustered_table()
def upgrade_with_unclustered_compact_table_test(self):
"""
Validates we can do basic name queries on legacy sstables for a COMPACT table without clustering.
"""
self.upgrade_with_unclustered_table(compact_storage=True)
def upgrade_with_clustered_table(self, compact_storage=False):
PARTITIONS = 2
ROWS = 1000
session = self._setup_cluster()
session.execute(
'CREATE TABLE t (k int, t int, v int, PRIMARY KEY (k, t))' +
(' WITH COMPACT STORAGE' if compact_storage else ''))
for n in range(PARTITIONS):
for r in range(ROWS):
session.execute("INSERT INTO t(k, t, v) VALUES ({n}, {r}, {r})".format(n=n, r=r))
session = self._do_upgrade()
for n in range(PARTITIONS):
assert_all(session,
"SELECT * FROM t WHERE k = {}".format(n),
[[n, v, v] for v in range(ROWS)])
assert_all(session,
"SELECT * FROM t WHERE k = {} ORDER BY t DESC".format(n),
[[n, v, v] for v in range(ROWS - 1, -1, -1)])
# Querying a "large" slice
start = ROWS / 10
end = ROWS - 1 - (ROWS / 10)
assert_all(session,
"SELECT * FROM t WHERE k = {n} AND t >= {start} AND t < {end}".format(n=n, start=start, end=end),
[[n, v, v] for v in range(start, end)])
assert_all(session,
"SELECT * FROM t WHERE k = {n} AND t >= {start} AND t < {end} ORDER BY t DESC".format(n=n, start=start, end=end),
[[n, v, v] for v in range(end - 1, start - 1, -1)])
# Querying a "small" slice
start = ROWS / 2
end = ROWS / 2 + 5
assert_all(session,
"SELECT * FROM t WHERE k = {n} AND t >= {start} AND t < {end}".format(n=n, start=start, end=end),
[[n, v, v] for v in range(start, end)])
assert_all(session,
"SELECT * FROM t WHERE k = {n} AND t >= {start} AND t < {end} ORDER BY t DESC".format(n=n, start=start, end=end),
[[n, v, v] for v in range(end - 1, start - 1, -1)])
self.cluster.compact()
for n in range(PARTITIONS):
assert_all(session, "SELECT * FROM t WHERE k = {}".format(n), [[n, v, v] for v in range(ROWS)])
assert_all(session,
"SELECT * FROM t WHERE k = {} ORDER BY t DESC".format(n),
[[n, v, v] for v in range(ROWS - 1, -1, -1)])
# Querying a "large" slice
start = ROWS / 10
end = ROWS - 1 - (ROWS / 10)
assert_all(session,
"SELECT * FROM t WHERE k = {n} AND t >= {start} AND t < {end}".format(n=n, start=start, end=end),
[[n, v, v] for v in range(start, end)])
assert_all(session,
"SELECT * FROM t WHERE k = {n} AND t >= {start} AND t < {end} ORDER BY t DESC".format(n=n, start=start, end=end),
[[n, v, v] for v in range(end - 1, start - 1, -1)])
# Querying a "small" slice
start = ROWS / 2
end = ROWS / 2 + 5
assert_all(session,
"SELECT * FROM t WHERE k = {n} AND t >= {start} AND t < {end}".format(n=n, start=start, end=end),
[[n, v, v] for v in range(start, end)])
assert_all(session,
"SELECT * FROM t WHERE k = {n} AND t >= {start} AND t < {end} ORDER BY t DESC".format(n=n, start=start, end=end),
[[n, v, v] for v in range(end - 1, start - 1, -1)])
def upgrade_with_unclustered_table(self, compact_storage=False):
PARTITIONS = 5
session = self._setup_cluster()
session.execute('CREATE TABLE t (k int PRIMARY KEY, v1 int, v2 int, v3 int, v4 int)' +
(' WITH COMPACT STORAGE' if compact_storage else ''))
for n in range(PARTITIONS):
session.execute("INSERT INTO t(k, v1, v2, v3, v4) VALUES ({}, {}, {}, {}, {})".format(n, n + 1, n + 2, n + 3, n + 4))
session = self._do_upgrade()
for n in range(PARTITIONS):
assert_one(session, "SELECT * FROM t WHERE k = {}".format(n), [n, n + 1, n + 2, n + 3, n + 4])
self.cluster.compact()
for n in range(PARTITIONS):
assert_one(session, "SELECT * FROM t WHERE k = {}".format(n), [n, n + 1, n + 2, n + 3, n + 4])
def upgrade_with_statics_test(self):
"""
Validates we can read legacy sstables with static columns.
"""
PARTITIONS = 1
ROWS = 10
session = self._setup_cluster()
session.execute('CREATE TABLE t (k int, s1 int static, s2 int static, t int, v1 int, v2 int, PRIMARY KEY (k, t))')
for n in range(PARTITIONS):
for r in range(ROWS):
session.execute("INSERT INTO t(k, s1, s2, t, v1, v2) VALUES ({}, {}, {}, {}, {}, {})".format(n, r, r + 1, r, r, r + 1))
session = self._do_upgrade()
for n in range(PARTITIONS):
assert_all(session,
"SELECT * FROM t WHERE k = {}".format(n),
[[n, v, ROWS - 1, ROWS, v, v + 1] for v in range(ROWS)])
assert_all(session,
"SELECT * FROM t WHERE k = {} ORDER BY t DESC".format(n),
[[n, v, ROWS - 1, ROWS, v, v + 1] for v in range(ROWS - 1, -1, -1)])
self.cluster.compact()
for n in range(PARTITIONS):
assert_all(session,
"SELECT * FROM t WHERE k = {}".format(n),
[[n, v, ROWS - 1, ROWS, v, v + 1] for v in range(ROWS)])
assert_all(session,
"SELECT * FROM t WHERE k = {} ORDER BY t DESC".format(n),
[[n, v, ROWS - 1, ROWS, v, v + 1] for v in range(ROWS - 1, -1, -1)])
def upgrade_with_wide_partition_test(self):
"""
Checks we can read old indexed sstable by creating large partitions (larger than the index block used by sstables).
"""
self.upgrade_with_wide_partition()
def upgrade_with_wide_partition_reversed_test(self):
"""
Checks we can read old indexed sstable by creating large partitions (larger than the index block used by sstables). This test
validates reverse queries.
"""
self.upgrade_with_wide_partition(query_modifier=" ORDER BY t DESC")
def upgrade_with_wide_partition(self, query_modifier=""):
ROWS = 100
session = self._setup_cluster()
session.execute('CREATE TABLE t (k int, t int, v1 int, v2 blob, v3 set<int>, PRIMARY KEY (k, t))')
# the blob is only here to make the row bigger internally so it sometimes span multiple index blocks
bigish_blob = "0x"
for i in range(1000):
bigish_blob = bigish_blob + "0000"
for r in range(ROWS):
session.execute("INSERT INTO t(k, t, v1, v2, v3) VALUES ({}, {}, {}, {}, {{{}, {}}})".format(0, r, r, bigish_blob, r * 2, r * 3))
self.cluster.flush()
# delete every other row
for r in range(0, ROWS, 2):
session.execute("DELETE FROM t WHERE k=0 AND t={}".format(r))
# delete the set from every other remaining row
for r in range(1, ROWS, 4):
session.execute("UPDATE t SET v3={{}} WHERE k=0 AND t={}".format(r))
session = self._do_upgrade()
for r in range(0, ROWS):
query = "SELECT t, v1, v3 FROM t WHERE k = 0 AND t={}{}".format(r, query_modifier)
if (r - 1) % 4 == 0:
assert_one(session, query, [r, r, None])
elif (r + 1) % 2 == 0:
assert_one(session, query, [r, r, set([r * 2, r * 3])])
else:
assert_none(session, query)
self.cluster.compact()
for r in range(ROWS):
query = "SELECT t, v1, v3 FROM t WHERE k = 0 AND t={}{}".format(r, query_modifier)
if (r - 1) % 4 == 0:
assert_one(session, query, [r, r, None])
elif (r + 1) % 2 == 0:
assert_one(session, query, [r, r, set([r * 2, r * 3])])
else:
assert_none(session, query)
def upgrade_with_index_test(self):
"""
Checks a simple index can still be read after upgrade.
"""
PARTITIONS = 2
ROWS = 4
session = self._setup_cluster()
session.execute('CREATE TABLE t (k int, t int, v1 int, v2 int, PRIMARY KEY (k, t))')
session.execute('CREATE INDEX ON t(v1)')
for p in range(PARTITIONS):
for r in range(ROWS):
session.execute("INSERT INTO t(k, t, v1, v2) VALUES ({}, {}, {}, {})".format(p, r, r % 2, r * 2))
self.cluster.flush()
assert_all(session,
"SELECT * FROM t WHERE v1 = 0",
[[p, r, 0, r * 2] for p in range(PARTITIONS) for r in range(ROWS) if r % 2 == 0],
ignore_order=True)
session = self._do_upgrade()
assert_all(session,
"SELECT * FROM t WHERE v1 = 0",
[[p, r, 0, r * 2] for p in range(PARTITIONS) for r in range(ROWS) if r % 2 == 0],
ignore_order=True)
self.cluster.compact()
assert_all(session,
"SELECT * FROM t WHERE v1 = 0",
[[p, r, 0, r * 2] for p in range(PARTITIONS) for r in range(ROWS) if r % 2 == 0],
ignore_order=True)
def upgrade_with_range_tombstones_test(self):
"""
Checks sstable including range tombstone can be read after upgrade.
@jira_ticket CASSANDRA-10360
"""
ROWS = 100
session = self._setup_cluster()
session.execute('CREATE TABLE t (k int, t1 int, t2 int, PRIMARY KEY (k, t1, t2))')
for n in range(ROWS):
session.execute("INSERT INTO t(k, t1, t2) VALUES (0, 0, {})".format(n))
session.execute("DELETE FROM t WHERE k=0 AND t1=0")
for n in range(0, ROWS, 2):
session.execute("INSERT INTO t(k, t1, t2) VALUES (0, 0, {})".format(n))
session = self._do_upgrade()
assert_all(session, "SELECT * FROM t WHERE k = 0", [[0, 0, n] for n in range(0, ROWS, 2)])
self.cluster.compact()
def upgrade_with_range_and_collection_tombstones_test(self):
"""
Check sstable including collection tombstone (inserted through adding a collection) can be read after upgrade.
@jira_ticket CASSANDRA-10743
"""
session = self._setup_cluster()
session.execute('CREATE TABLE t (k text, t int, c list<int>, PRIMARY KEY (k, t))')
session.execute("INSERT INTO t(k, t, c) VALUES ('some_key', 0, %s)" % str([i for i in range(10000)]))
session = self._do_upgrade()
self.cluster.compact()
assert_one(session, "SELECT k FROM t", ['some_key'])
@since('3.0')
class TestBootstrapAfterUpgrade(TestStorageEngineUpgrade):
def setUp(self):
super(TestBootstrapAfterUpgrade, self).setUp(bootstrap=True, jvm_args=LEGACY_SSTABLES_JVM_ARGS)
@since('3.0')
class TestLoadKaSStables(BaseSStableLoaderTest):
__test__ = True
upgrade_from = '2.1.6'
jvm_args = LEGACY_SSTABLES_JVM_ARGS
@since('3.0')
class TestLoadKaCompactSStables(BaseSStableLoaderTest):
__test__ = True
upgrade_from = '2.1.6'
jvm_args = LEGACY_SSTABLES_JVM_ARGS
compact = True
@since('3.0')
class TestLoadLaSStables(BaseSStableLoaderTest):
__test__ = True
upgrade_from = '2.2.4'
jvm_args = LEGACY_SSTABLES_JVM_ARGS
@since('3.0')
class TestLoadLaCompactSStables(BaseSStableLoaderTest):
__test__ = True
upgrade_from = '2.2.4'
jvm_args = LEGACY_SSTABLES_JVM_ARGS
compact = True
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Taobao Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Storage backend for Sheepdog storage system"""
import hashlib
from oslo.config import cfg
from glance.common import exception
from glance.openstack.common import excutils
import glance.openstack.common.log as logging
from glance.openstack.common import processutils
from glance.openstack.common import uuidutils
import glance.store
import glance.store.base
import glance.store.location
LOG = logging.getLogger(__name__)
DEFAULT_ADDR = '127.0.0.1'
DEFAULT_PORT = '7000'
DEFAULT_CHUNKSIZE = 64 # in MiB
LOG = logging.getLogger(__name__)
sheepdog_opts = [
cfg.IntOpt('sheepdog_store_chunk_size', default=DEFAULT_CHUNKSIZE,
help=_('Images will be chunked into objects of this size '
'(in megabytes). For best performance, this should be '
'a power of two.')),
cfg.StrOpt('sheepdog_store_port', default=DEFAULT_PORT,
help=_('Port of sheep daemon.')),
cfg.StrOpt('sheepdog_store_address', default=DEFAULT_ADDR,
help=_('IP address of sheep daemon.'))
]
CONF = cfg.CONF
CONF.register_opts(sheepdog_opts)
class SheepdogImage:
"""Class describing an image stored in Sheepdog storage."""
def __init__(self, addr, port, name, chunk_size):
self.addr = addr
self.port = port
self.name = name
self.chunk_size = chunk_size
def _run_command(self, command, data, *params):
cmd = ["collie", "vdi"]
cmd.extend(command)
cmd.extend(["-a", self.addr, "-p", self.port, self.name])
cmd.extend(params)
try:
return processutils.execute(*cmd, process_input=data)[0]
except (processutils.ProcessExecutionError, OSError) as exc:
LOG.error(exc)
raise glance.store.BackendException(exc)
def get_size(self):
"""
Return the size of the this iamge
Sheepdog Usage: collie vdi list -r -a address -p port image
"""
out = self._run_command(["list", "-r"], None)
return long(out.split(' ')[3])
def read(self, offset, count):
"""
Read up to 'count' bytes from this image starting at 'offset' and
return the data.
Sheepdog Usage: collie vdi read -a address -p port image offset len
"""
return self._run_command(["read"], None, str(offset), str(count))
def write(self, data, offset, count):
"""
Write up to 'count' bytes from the data to this image starting at
'offset'
Sheepdog Usage: collie vdi write -a address -p port image offset len
"""
self._run_command(["write"], data, str(offset), str(count))
def create(self, size):
"""
Create this image in the Sheepdog cluster with size 'size'.
Sheepdog Usage: collie vdi create -a address -p port image size
"""
self._run_command(["create"], None, str(size))
def delete(self):
"""
Delete this image in the Sheepdog cluster
Sheepdog Usage: collie vdi delete -a address -p port image
"""
self._run_command(["delete"], None)
def exist(self):
"""
Check if this image exists in the Sheepdog cluster via 'list' command
Sheepdog Usage: collie vdi list -r -a address -p port image
"""
out = self._run_command(["list", "-r"], None)
if not out:
return False
else:
return True
class StoreLocation(glance.store.location.StoreLocation):
"""
Class describing a Sheepdog URI. This is of the form:
sheepdog://image-id
"""
def process_specs(self):
self.image = self.specs.get('image')
def get_uri(self):
return "sheepdog://%s" % self.image
def parse_uri(self, uri):
valid_schema = 'sheepdog://'
if not uri.startswith(valid_schema):
raise exception.BadStoreUri(_("URI must start with %s://") %
valid_schema)
self.image = uri[len(valid_schema):]
if not uuidutils.is_uuid_like(self.image):
raise exception.BadStoreUri(_("URI must contains well-formated "
"image id"))
class ImageIterator(object):
"""
Reads data from an Sheepdog image, one chunk at a time.
"""
def __init__(self, image):
self.image = image
def __iter__(self):
image = self.image
total = left = image.get_size()
while left > 0:
length = min(image.chunk_size, left)
data = image.read(total - left, length)
left -= len(data)
yield data
raise StopIteration()
class Store(glance.store.base.Store):
"""Sheepdog backend adapter."""
EXAMPLE_URL = "sheepdog://image"
def get_schemes(self):
return ('sheepdog',)
def configure_add(self):
"""
Configure the Store to use the stored configuration options
Any store that needs special configuration should implement
this method. If the store was not able to successfully configure
itself, it should raise `exception.BadStoreConfiguration`
"""
try:
self.chunk_size = CONF.sheepdog_store_chunk_size * 1024 * 1024
self.addr = CONF.sheepdog_store_address.strip()
self.port = CONF.sheepdog_store_port
except cfg.ConfigFileValueError as e:
reason = _("Error in store configuration: %s") % e
LOG.error(reason)
raise exception.BadStoreConfiguration(store_name='sheepdog',
reason=reason)
if ' ' in self.addr:
reason = (_("Invalid address configuration of sheepdog store: %s")
% self.addr)
LOG.error(reason)
raise exception.BadStoreConfiguration(store_name='sheepdog',
reason=reason)
try:
cmd = ["collie", "vdi", "list", "-a", self.addr, "-p", self.port]
processutils.execute(*cmd)
except Exception as e:
reason = _("Error in store configuration: %s") % e
LOG.error(reason)
raise exception.BadStoreConfiguration(store_name='sheepdog',
reason=reason)
def get(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file, and returns a generator for reading
the image file
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises `glance.exception.NotFound` if image does not exist
"""
loc = location.store_location
image = SheepdogImage(self.addr, self.port, loc.image,
self.chunk_size)
if not image.exist():
raise exception.NotFound(_("Sheepdog image %s does not exist")
% image.name)
return (ImageIterator(image), image.get_size())
def get_size(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file and returns the image size
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises `glance.exception.NotFound` if image does not exist
:rtype int
"""
loc = location.store_location
image = SheepdogImage(self.addr, self.port, loc.image,
self.chunk_size)
if not image.exist():
raise exception.NotFound(_("Sheepdog image %s does not exist")
% image.name)
return image.get_size()
def add(self, image_id, image_file, image_size):
"""
Stores an image file with supplied identifier to the backend
storage system and returns a tuple containing information
about the stored image.
:param image_id: The opaque image identifier
:param image_file: The image data to write, as a file-like object
:param image_size: The size of the image data to write, in bytes
:retval tuple of URL in backing store, bytes written, and checksum
:raises `glance.common.exception.Duplicate` if the image already
existed
"""
image = SheepdogImage(self.addr, self.port, image_id,
self.chunk_size)
if image.exist():
raise exception.Duplicate(_("Sheepdog image %s already exists")
% image_id)
location = StoreLocation({'image': image_id})
checksum = hashlib.md5()
image.create(image_size)
try:
total = left = image_size
while left > 0:
length = min(self.chunk_size, left)
data = image_file.read(length)
image.write(data, total - left, length)
left -= length
checksum.update(data)
except:
# Note(zhiyan): clean up already received data when
# error occurs such as ImageSizeLimitExceeded exception.
with excutils.save_and_reraise_exception():
image.delete()
return (location.get_uri(), image_size, checksum.hexdigest(), {})
def delete(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file to delete
:location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises NotFound if image does not exist
"""
loc = location.store_location
image = SheepdogImage(self.addr, self.port, loc.image,
self.chunk_size)
if not image.exist():
raise exception.NotFound(_("Sheepdog image %s does not exist") %
loc.image)
image.delete()
|
|
#!/usr/bin/env python2.7
"""Eject multiple tapes from a Dell PowerVault 124T autoloader in one go.
No way to do this over SCSI. Instead this works over HTTP through the
autoloader's web interface. From: https://github.com/Robpol86/tape_bulk_eject.
Supply credentials through ~/.pv124t.json:
{
"host": "192.168.0.50",
"user": "admin",
"pass": "super_secret_password"
}
"""
from __future__ import print_function
import argparse
import base64
import HTMLParser
import json
import logging
import os
import re
import signal
import sys
import time
import urllib2
__author__ = '@Robpol86'
__license__ = 'MIT'
class HandledError(Exception):
"""Raised on a handled error Causes exit code 1.
Any other exception raised is considered a bug.
"""
pass
class AutoloaderError(Exception):
"""Raised when the autoloader replies with HTTP 401.
The stupid thing uses 401 for everything. Invalid credentials, locked drive, bad request data,
and even rate limiting.
"""
pass
class InfoFilter(logging.Filter):
"""Filter out non-info and non-debug logging statements.
From: https://stackoverflow.com/questions/16061641/python-logging-split/16066513#16066513
"""
def filter(self, record):
"""Filter method.
:param record: Log record object.
:return: Keep or ignore this record.
:rtype: bool
"""
return record.levelno <= logging.INFO
class TapePos(HTMLParser.HTMLParser):
"""Parses commands.html and gets current tape positions.
:cvar RE_ONCLICK: <img /> onclick attribute parser (e.g. onClick="from_to(mailslot)").
:ivar bool in_center_tag: If parser is within <center /> on the page. There's only one.
:ivar dict inventory: Populates this with the current inventory parsed from HTML.
"""
RE_ONCLICK = re.compile(r'from_to\((?:slot(\d+)|(drive|picker|mailslot))\)')
def __init__(self, inventory):
"""Constructor."""
HTMLParser.HTMLParser.__init__(self)
self.in_center_tag = False
self.inventory = inventory
def handle_starttag(self, tag, attrs):
"""Called on all starting tags.
:param str tag: Current HTML tag (e.g. 'center' or 'img').
:param list attrs: List of attributes (key value pairs) on this HTML tag.
"""
if tag == 'center':
self.in_center_tag = True
elif tag == 'img' and self.in_center_tag:
attributes = dict(attrs)
if 'onclick' in attributes and 'title' in attributes:
self.update_slot(attributes)
def handle_endtag(self, tag):
"""Called on all ending/closing tags.
:param str tag: Current HTML tag (e.g. 'center' or 'img').
"""
if tag == 'center':
self.in_center_tag = False
def update_slot(self, attrs):
"""Update self.inventory with current state of a single slot.
:raise HandledError: On handled errors. Logs before raising. Program should exit.
:param dict attrs: Attributes of <img /> tag representing a slot.
"""
logger = logging.getLogger('TapePos.update_slot')
logger.debug('img tag attributes: %s', str(attrs))
onclick, tape = attrs['onclick'], attrs['title']
if tape == 'Empty':
return
try:
slot = [i for i in self.RE_ONCLICK.match(onclick).groups() if i][0]
except AttributeError:
logger.error('Attribute "onclick" in img tag is invalid: %s', onclick)
raise HandledError
self.inventory[tape] = slot
class Autoloader(object):
"""Interfaces with the autoloader over its HTTP web interface.
:cvar int DELAY: Number of seconds to wait between queries. The web interface is very fragile.
:cvar int DELAY_ERROR: Number of seconds to wait if we get an error from the autoloader.
:ivar str auth: HTTP basic authentication credentials (base64 encoded).
:ivar dict inventory: Tape positions. 16 slots + drive (17), picker (18), and mail slot (19).
:ivar float last_access: Unix time of last HTTP query.
:ivar str url: URL prefix of the autoloader (e.g. 'http://192.168.0.50/').
"""
DELAY = 10
DELAY_ERROR = 15
def __init__(self, host_name, user_name, pass_word):
"""Constructor.
:param str host_name: Hostname or IP address of the autoloader.
:param str user_name: HTTP username (e.g. 'admin').
:param str pass_word: HTTP password.
"""
self.auth = base64.standard_b64encode(':'.join((user_name, pass_word)))
self.inventory = dict()
self.last_access = time.time() - self.DELAY
self.url = 'http://{}/'.format(host_name)
def _query(self, request, no_delay=False):
"""Query the autoloader's web interface. Enforces delay timer.
:raise HandledError: On handled errors. Logs before raising. Program should exit.
:raise AutoloaderError: On HTTP 401 errors when querying the web interface.
:param urllib2.Request request: urllib2.Request instance with data/headers already added.
:param bool no_delay: Exclude this query from sleeping and updating last_access.
:return: HTML response payload.
:rtype: str
"""
logger = logging.getLogger('Autoloader._query')
if not no_delay:
sleep_for = max(0, self.DELAY - (time.time() - self.last_access))
if sleep_for:
logger.debug('Sleeping for %d second(s).', sleep_for)
time.sleep(sleep_for)
logger.debug('Done sleeping.')
self.last_access = time.time()
logger.debug('Set last_access to %f', self.last_access)
# Send request and get response.
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError as exc:
url = request.get_full_url()
if exc.code == 404:
logger.error('404 Not Found on: %s', url)
elif exc.code == 401:
logger.debug('401 Unauthorized on: %s', url)
raise AutoloaderError
else:
logger.error('%s returned HTTP %s instead of 200.', url, exc.code)
raise HandledError
except urllib2.URLError as exc:
url = request.get_full_url()
logger.error('URL "%s" is invalid: %s', url, str(exc))
raise HandledError
html = response.read(102400)
logger.debug('Got HTML from autoloader: %s', html)
return html
def check_creds(self):
"""Check credentials by going to config_opts.html. Doesn't change anything.
:raise HandledError: On handled errors. Logs before raising. Program should exit.
"""
logger = logging.getLogger('Autoloader.auth')
request = urllib2.Request(self.url + 'config_ops.html')
request.headers['Authorization'] = 'Basic {}'.format(self.auth)
try:
self._query(request, no_delay=True)
except AutoloaderError:
message = '%s 401 Unauthorized. Possibly rate limiting or invalid credentials.'
logger.error(message, request.get_full_url())
raise HandledError
def eject(self, tape):
"""Perform tape move to the mailslot thereby "ejecting" it.
Blocks during entire move operation. Once done self.inventory is updated.
:param str tape: The tape to eject.
"""
logger = logging.getLogger('Autoloader.eject')
slot = self.inventory[tape]
slot = int(dict(drive=17, mailslot=18, picker=19).get(slot, slot))
data = 'from={}&to=18&submit=submit'.format(slot)
logger.debug('Eject POST data: %s', data)
request = urllib2.Request(self.url + 'move.cgi', data)
request.headers['Authorization'] = 'Basic {}'.format(self.auth)
request.headers['Content-type'] = 'application/x-www-form-urlencoded'
request.headers['Origin'] = self.url.rstrip('/')
request.headers['Referer'] = self.url + 'commands.html'
# Eject tape.
while True:
try:
html = self._query(request)
except AutoloaderError:
logger.warning('Error while ejecting. Retrying in %s seconds...', self.DELAY_ERROR)
time.sleep(self.DELAY_ERROR)
continue
self.update_inventory(html)
if tape not in self.inventory or self.inventory[tape] == 'mailslot':
break
if self.inventory[tape] == 'drive':
logger.warning('Failed, drive locked? Retrying in %s seconds...', self.DELAY_ERROR)
else:
logger.warning('Tape did not move. Retrying in %s seconds...', self.DELAY_ERROR)
time.sleep(self.DELAY_ERROR)
def update_inventory(self, html=None):
"""Get current tape positions in the autoloader and updates self.inventory.
:raise HandledError: On handled errors. Logs before raising. Program should exit.
:param str html: Parse this html if set. Otherwise requests HTML from autoloader.
"""
logger = logging.getLogger('Autoloader.update_inventory')
if not html:
request = urllib2.Request(self.url + 'commands.html')
html = self._query(request)
if not TapePos.RE_ONCLICK.search(html):
logger.error('Invalid HTML, found no regex matches.')
raise HandledError
parser = TapePos(self.inventory)
self.inventory.clear()
parser.feed(html)
logger.debug('Loaded tapes: %s', '|'.join(sorted(self.inventory)))
def get_arguments(argv=None):
"""Get command line arguments.
:param list argv: Command line argument list to process.
:return: Argparse Namespace object.
"""
program = os.path.basename(__file__).replace('.pyc', '.py')
parser = argparse.ArgumentParser(prog=program, description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true', help='print debug messages')
parser.add_argument('tapes', nargs='+', metavar='TAPE', type=str,
help='list of tapes, space or | delimited.')
return parser.parse_args(args=argv if argv is not None else sys.argv[1:])
def setup_logging(arguments, logger=None):
"""Setup console logging. Info and below go to stdout, others go to stderr.
:param arguments: Argparse Namespace object from get_arguments().
:param str logger: Which logger to set handlers to. Used for testing.
:return: Same Argparse Namespace object in arguments.
"""
verbose = arguments.verbose
format_ = '%(asctime)s %(levelname)-8s %(name)-20s %(message)s' if verbose else '%(message)s'
level = logging.DEBUG if verbose else logging.INFO
handler_stdout = logging.StreamHandler(sys.stdout)
handler_stdout.setFormatter(logging.Formatter(format_))
handler_stdout.setLevel(logging.DEBUG)
handler_stdout.addFilter(InfoFilter())
handler_stderr = logging.StreamHandler(sys.stderr)
handler_stderr.setFormatter(logging.Formatter(format_))
handler_stderr.setLevel(logging.WARNING)
root_logger = logging.getLogger(logger)
root_logger.setLevel(level)
root_logger.addHandler(handler_stdout)
root_logger.addHandler(handler_stderr)
return arguments
def combine_config(arguments):
"""Read configuration file and validate command line arguments.
:raise HandledError: On handled errors. Logs before raising. Program should exit.
:param arguments: Argparse Namespace object from get_arguments().
:return: User input data.
:rtype: dict
"""
logger = logging.getLogger('combine_config')
# Get list of tapes from arguments.
logger.debug('Reading arguments.tapes: %s', str(arguments.tapes))
tapes = sorted(set('|'.join(arguments.tapes).replace(' ', '|').strip().strip('|').split('|')))
if not tapes or not all(tapes):
logger.error('No tapes specified.')
raise HandledError
logger.debug('Got: %s', str(tapes))
# Read config file.
json_file = os.path.join(os.path.expanduser('~'), '.pv124t.json')
logger.debug('Reading: %s', json_file)
try:
with open(json_file) as handle:
json_file_data = handle.read(1024)
except IOError as exc:
logger.error('Failed to read %s: %s', json_file, str(exc))
raise HandledError
logger.debug('Got: %s', json_file_data)
# Parse config file json.
try:
json_parsed = json.loads(json_file_data)
except ValueError as exc:
logger.error('Failed to parse json in %s: %s', json_file, exc.message)
raise HandledError
logger.debug('Got: %s', str(json_parsed))
# Read values from json.
try:
host_name = json_parsed['host']
user_name = json_parsed['user']
pass_word = json_parsed['pass']
except TypeError:
logger.error('JSON data not a dictionary.')
raise HandledError
except KeyError as exc:
logger.error('Missing key from JSON dict: %s', exc.message)
raise HandledError
# Catch empty values.
if not all((host_name, user_name, pass_word)):
logger.error('One or more JSON value is empty.')
raise HandledError
return {'tapes': tapes, 'host': host_name, 'user': user_name, 'pass': pass_word}
def main(config):
"""Main function of program.
:param dict config: Parsed command line and config file data.
"""
logger = logging.getLogger('main')
logger.info('Connecting to autoloader and reading tape inventory...')
autoloader = Autoloader(config['host'], config['user'], config['pass'])
autoloader.check_creds()
autoloader.update_inventory()
# Purge missing tapes.
tapes = list()
for tape in reversed(config['tapes']):
if tape not in autoloader.inventory:
logger.info('%s not in autoloader, skipping.', tape)
continue
if autoloader.inventory[tape] == 'mailslot':
logger.error('%s already in mailslot, skipping.', tape)
continue
tapes.append(tape)
if not tapes:
logging.info('No tapes to eject. Nothing to do.')
return
while tapes:
# Make sure mailslot is clear.
if 'mailslot' in autoloader.inventory.values():
logger.info('Tape in mailslot, remove to continue...')
autoloader.update_inventory()
continue
# Eject.
tape = tapes.pop()
left = len(tapes)
logger.info('Ejecting %s (%d other%s left)...', tape, left, '' if left == 1 else 's')
autoloader.eject(tape)
total = len(config['tapes'])
logger.info('Ejected %d tape%s.', total, '' if total == 1 else 's')
if __name__ == '__main__':
signal.signal(signal.SIGINT, lambda *_: getattr(os, '_exit')(0)) # Properly handle Control+C.
try:
main(combine_config(setup_logging(get_arguments())))
except HandledError:
logging.critical('EXITING DUE TO ERROR!')
sys.exit(1)
logging.info('Success.')
|
|
from django.forms import widgets
from requests.exceptions import ConnectionError
from rest_framework import serializers
from rest_framework.reverse import reverse
from django.core.cache import cache
from django.db.models import Count
from onadata.apps.logger.models import XForm, Instance
from onadata.libs.permissions import get_object_users_with_permissions
from onadata.libs.serializers.fields.boolean_field import BooleanField
from onadata.libs.serializers.tag_list_serializer import TagListSerializer
from onadata.libs.serializers.metadata_serializer import MetaDataSerializer
from onadata.libs.utils.decorators import check_obj
from onadata.libs.utils.viewer_tools import enketo_url, EnketoError
from onadata.libs.utils.viewer_tools import _get_form_url
from onadata.apps.main.views import get_enketo_preview_url
from onadata.apps.main.models.meta_data import MetaData
from onadata.libs.utils.cache_tools import (XFORM_PERMISSIONS_CACHE,
ENKETO_URL_CACHE,
ENKETO_PREVIEW_URL_CACHE,
XFORM_METADATA_CACHE,
XFORM_DATA_VERSIONS)
class XFormSerializer(serializers.HyperlinkedModelSerializer):
formid = serializers.Field(source='id')
metadata = serializers.SerializerMethodField('get_xform_metadata')
owner = serializers.HyperlinkedRelatedField(view_name='user-detail',
source='user',
lookup_field='username')
created_by = serializers.HyperlinkedRelatedField(view_name='user-detail',
source='created_by',
lookup_field='username')
public = BooleanField(source='shared', widget=widgets.CheckboxInput())
public_data = BooleanField(source='shared_data')
require_auth = BooleanField(source='require_auth',
widget=widgets.CheckboxInput())
submission_count_for_today = serializers.Field(
source='submission_count_for_today')
tags = TagListSerializer(read_only=True)
title = serializers.CharField(max_length=255, source='title')
url = serializers.HyperlinkedIdentityField(view_name='xform-detail',
lookup_field='pk')
users = serializers.SerializerMethodField('get_xform_permissions')
enketo_url = serializers.SerializerMethodField('get_enketo_url')
enketo_preview_url = serializers.SerializerMethodField(
'get_enketo_preview_url')
instances_with_geopoints = serializers.SerializerMethodField(
'get_instances_with_geopoints')
num_of_submissions = serializers.SerializerMethodField(
'get_num_of_submissions')
form_versions = serializers.SerializerMethodField(
'get_xform_versions')
class Meta:
model = XForm
read_only_fields = (
'json', 'xml', 'date_created', 'date_modified', 'encrypted',
'bamboo_dataset', 'last_submission_time')
exclude = ('id', 'json', 'xml', 'xls', 'user', 'has_start_time',
'shared', 'shared_data', 'deleted_at')
def get_num_of_submissions(self, obj):
if obj.num_of_submissions != obj.instances.filter(
deleted_at__isnull=True).count():
obj.submission_count(force_update=True)
return obj.num_of_submissions
def get_instances_with_geopoints(self, obj):
if not obj.instances_with_geopoints and obj.instances.exclude(
geom=None).count() > 0:
obj.instances_with_geopoints = True
obj.save()
return obj.instances_with_geopoints
def get_xform_permissions(self, obj):
if obj:
xform_perms = cache.get(
'{}{}'.format(XFORM_PERMISSIONS_CACHE, obj.pk))
if xform_perms:
return xform_perms
xform_perms = get_object_users_with_permissions(obj)
cache.set(
'{}{}'.format(XFORM_PERMISSIONS_CACHE, obj.pk), xform_perms)
return xform_perms
return []
def get_enketo_url(self, obj):
if obj:
_enketo_url = cache.get(
'{}{}'.format(ENKETO_URL_CACHE, obj.pk))
if _enketo_url:
return _enketo_url
try:
metadata = MetaData.objects.get(
xform=obj, data_type="enketo_url")
except MetaData.DoesNotExist:
request = self.context.get('request')
form_url = _get_form_url(request, obj.user.username)
url = ""
try:
url = enketo_url(form_url, obj.id_string)
MetaData.enketo_url(obj, url)
except (EnketoError, ConnectionError):
pass
cache.set('{}{}'.format(ENKETO_URL_CACHE, obj.pk), url)
return url
_enketo_url = metadata.data_value
cache.set('{}{}'.format(ENKETO_URL_CACHE, obj.pk), _enketo_url)
return _enketo_url
return None
def get_enketo_preview_url(self, obj):
if obj:
_enketo_preview_url = cache.get(
'{}{}'.format(ENKETO_PREVIEW_URL_CACHE, obj.pk))
if _enketo_preview_url:
return _enketo_preview_url
try:
metadata = MetaData.objects.get(
xform=obj, data_type="enketo_preview_url")
except MetaData.DoesNotExist:
request = self.context.get('request')
preview_url = ""
try:
preview_url = get_enketo_preview_url(request,
obj.user.username,
obj.id_string)
MetaData.enketo_preview_url(obj, preview_url)
except EnketoError:
pass
cache.set('{}{}'.format(ENKETO_PREVIEW_URL_CACHE, obj.pk),
preview_url)
return preview_url
_enketo_preview_url = metadata.data_value
cache.set(
'{}{}'.format(ENKETO_URL_CACHE, obj.pk), _enketo_preview_url)
return _enketo_preview_url
return None
def get_xform_metadata(self, obj):
if obj:
xform_metadata = cache.get(
'{}{}'.format(XFORM_METADATA_CACHE, obj.pk))
if xform_metadata:
return xform_metadata
xform_metadata = MetaDataSerializer(
obj.metadata_set.all(),
many=True,
context=self.context).data
cache.set(
'{}{}'.format(XFORM_METADATA_CACHE, obj.pk), xform_metadata)
return xform_metadata
return []
def get_xform_versions(self, obj):
if obj:
versions = cache.get('{}{}'.format(XFORM_DATA_VERSIONS, obj.pk))
if versions:
return versions
versions = Instance.objects.filter(xform=obj)\
.values('version')\
.annotate(total=Count('version'))
if versions:
cache.set('{}{}'.format(XFORM_DATA_VERSIONS, obj.pk),
list(versions))
return versions
return []
class XFormListSerializer(serializers.Serializer):
formID = serializers.Field(source='id_string')
name = serializers.Field(source='title')
majorMinorVersion = serializers.SerializerMethodField('get_version')
version = serializers.SerializerMethodField('get_version')
hash = serializers.SerializerMethodField('get_hash')
descriptionText = serializers.Field(source='description')
downloadUrl = serializers.SerializerMethodField('get_url')
manifestUrl = serializers.SerializerMethodField('get_manifest_url')
def get_version(self, obj):
return None
@check_obj
def get_hash(self, obj):
return u"md5:%s" % obj.hash
@check_obj
def get_url(self, obj):
kwargs = {'pk': obj.pk, 'username': obj.user.username}
request = self.context.get('request')
return reverse('download_xform', kwargs=kwargs, request=request)
@check_obj
def get_manifest_url(self, obj):
kwargs = {'pk': obj.pk, 'username': obj.user.username}
request = self.context.get('request')
return reverse('manifest-url', kwargs=kwargs, request=request)
class XFormManifestSerializer(serializers.Serializer):
filename = serializers.Field(source='data_value')
hash = serializers.SerializerMethodField('get_hash')
downloadUrl = serializers.SerializerMethodField('get_url')
@check_obj
def get_url(self, obj):
kwargs = {'pk': obj.xform.pk,
'username': obj.xform.user.username,
'metadata': obj.pk}
request = self.context.get('request')
format = obj.data_value[obj.data_value.rindex('.') + 1:]
return reverse('xform-media', kwargs=kwargs,
request=request, format=format.lower())
@check_obj
def get_hash(self, obj):
return u"%s" % (obj.file_hash or 'md5:')
|
|
import datetime as dt
import pandas as pd
from hyperparameters import load_parameters
from commands import train
def hyperscan(x,
y,
data,
epochs,
flatten,
dropout,
batch_sizes,
batch_sizes_step,
layers,
layers_step,
activation_out,
neuron_max,
losses,
optimizers,
activations,
shapes):
'''
mode = 'auto' will scan through all
'selective' will scan through selected
When you have selective mode on, then just
set the parameters you don't want to scan
and leave the rest to 'auto'. Those that are
on 'auto' will be scanned.
Input can be either string for a single parameter,
or a list for multiple parameters.
'''
temp_list = []
if losses is 'auto':
losses = load_parameters('losses')
elif type(losses) is str:
losses = [losses]
if activations is 'auto':
activations = load_parameters('activations')
elif type(activations) is str:
activations = [activations]
if optimizers is 'auto':
optimizers = load_parameters('optimizers')
elif type(optimizers) is str:
optimizers = [optimizers]
if shapes is 'auto':
shapes = load_parameters('shapes')
elif type(shapes) is str:
shapes = [shapes]
if layers is 'auto':
layers = range(2, 15, layers_step)
elif type(layers) is int:
layers = [layers]
elif type(layers) is list:
layers = range(layers[0], layers[1], layers_step)
if batch_sizes is 'auto':
batch_sizes = range(2, 15, batch_sizes_step)
elif type(batch_sizes) is int:
batch_sizes = [batch_sizes]
elif type(batch_sizes) is list:
batch_sizes = range(batch_sizes[0], batch_sizes[1], batch_sizes_step)
a = len(losses)
b = len(shapes)
c = len(activations)
d = len(optimizers)
e = len(batch_sizes)
f = len(layers)
no_of_tries = a * b * c * d * e * f
start_time = dt.datetime.now()
print("Total tries in this scan: %d" % no_of_tries)
print("Scan started on: %s" % start_time.strftime('%H:%M'))
column_list = ['train_acc', 'train_acc_mean', 'train_acc_min',
'train_acc_max', 'train_acc_std', 'train_loss',
'train_loss_mean', 'train_loss_min', 'train_loss_max',
'train_loss_std', 'test_acc', 'test_acc_mean',
'test_acc_min', 'test_acc_max', 'test_acc_std', 'test_loss',
'test_loss_mean', 'test_loss_min', 'test_loss_max',
'test_loss_std', 'shape', 'activation', 'activation_out',
'loss', 'optimizer', 'epochs', 'layers', 'features',
'dropout', 'batch_size', 'max_neurons', 'network_scale']
counter = 0
for loss in losses:
for activation in activations:
for optimizer in optimizers:
for shape in shapes:
for layer in layers:
for batch_size in batch_sizes:
counter += 1
temp = train(x,
y,
data,
epoch=epochs,
flatten=flatten,
dropout=dropout,
layers=layer,
batch_size=batch_size,
activation_out=activation_out,
neuron_max=neuron_max,
hyperscan=True,
loss=loss,
activation=activation,
optimizer=optimizer,
shape=shape)
out = _data_prep(temp)
temp_list.append(out)
if counter == 1:
try_time = dt.datetime.now()
temp = (try_time - start_time) * no_of_tries
finish_est = temp + start_time
finish_est = finish_est.strftime('%H:%M')
print("Estimated finish: %s" % finish_est)
# creating a backup to a file every 50 tries
if counter % 50 == 0:
backup_to_csv = _to_df(temp_list, column_list)
backup_to_csv.to_csv('hyperscan.csv')
print('tries left: %d' % no_of_tries - counter)
df = _to_df(temp_list, column_list)
return df
def _to_df(data, cols):
'''Dataframe maker
Takes the input of the scan and puts it in to
a dataframe. This is to avoid having to use
the same code twice.
'''
df = pd.DataFrame(data)
df.columns = cols
return df
def _data_prep(data):
'''
Prepares the data for appending to dataframe round by round.
'''
a = data[1][-10:]['train_acc'].median()
b = data[1][-10:]['train_acc'].mean()
c = data[1]['train_acc'].min()
d = data[1]['train_acc'].max()
e = data[1][-10:]['train_acc'].std()
f = data[1][-10:]['train_loss'].median()
g = data[1][-10:]['train_loss'].mean()
h = data[1]['train_loss'].min()
i = data[1]['train_loss'].max()
j = data[1][-10:]['train_loss'].std()
k = data[1][-10:]['test_acc'].median()
l = data[1][-10:]['test_acc'].mean()
m = data[1]['test_acc'].min()
n = data[1]['test_acc'].max()
o = data[1][-10:]['test_acc'].std()
p = data[1][-10:]['test_loss'].median()
q = data[1][-10:]['test_loss'].mean()
r = data[1]['test_loss'].min()
s = data[1]['test_loss'].max()
t = data[1][-10:]['test_loss'].std()
u = data[0]['shape']
v = data[2]['activation']
w = data[2]['activation_out']
x = data[2]['loss']
y = data[2]['optimizer']
z = data[0]['epochs']
aa = data[0]['layers']
ab = data[0]['features']
ac = data[0]['dropout']
ad = data[0]['batch_size']
ae = data[0]['max_neurons']
af = data[0]['network_scale']
out = [a, b, c, d, e, f, g, h, i, j, k, l, m, n,
o, p, q, r, s, t, u, v, w, x, y, z, aa, ab, ac, ad, ae, af]
return out
|
|
# Interface for Rabbit MQ to intercommunicate with ARGO jobs
import sys,os,ssl
import time
#import logging
from pUtil import tolog
try:
import pika
except:
tolog("pika module does not exist - ARGO interface will fail")
#logger = logging.getLogger(__name__)
#logging.getLogger('pika').setLevel(logging.WARNING)
#logging.getLogger('select_connection').setLevel(logging.DEBUG)
class MessageInterface:
def __init__(self,
username = '',
password = '',
host = '',
port = -1,
virtual_host = '/',
socket_timeout = 120,
exchange_name = '',
exchange_type = 'topic',
exchange_durable = True,
exchange_auto_delete = False,
ssl_cert = '',
ssl_key = '',
ssl_ca_certs = '',
queue_is_durable = True,
queue_is_exclusive = False,
queue_is_auto_delete = True,
):
self.username = username
self.password = password
self.host = host
self.port = port
self.virtual_host = virtual_host
self.socket_timeout = socket_timeout
self.exchange_name = exchange_name
self.exchange_type = exchange_type
self.exchange_durable = exchange_durable
self.exchange_auto_delete = exchange_auto_delete
self.queue_is_durable = queue_is_durable
self.queue_is_exclusive = queue_is_exclusive
self.queue_is_auto_delete = queue_is_auto_delete
self.ssl_cert = ssl_cert
self.ssl_key = ssl_key
self.ssl_ca_certs = ssl_ca_certs
self.credentials = None
self.parameters = None
self.connection = None
self.channel = None
def open_blocking_connection(self):
tolog("MQ ARGO: open blocking connection")
self.create_connection_parameters()
# open the connection and grab the channel
try:
self.connection = pika.BlockingConnection(self.parameters)
except:
tolog('MQ ARGO: Exception received while trying to open blocking connection to message server: ' + str(sys.exc_info()))
raise
try:
self.channel = self.connection.channel()
except:
tolog('MQ ARGO: Exception received while trying to open a channel to the message server: ' + str(sys.exc_info()))
raise
tolog("MQ ARGO: create exchange, name = " + self.exchange_name)
# make sure exchange exists (doesn't do anything if already created)
self.channel.exchange_declare(
exchange = self.exchange_name,
exchange_type = self.exchange_type,
durable = self.exchange_durable,
auto_delete = self.exchange_auto_delete,
)
def open_select_connection(self,
on_open_callback = None,
on_open_error_callback = None,
on_close_callback = None,
stop_ioloop_on_close = True,
):
tolog("MQ ARGO: create select connection")
self.create_connection_parameters()
# open the connection
if on_open_callback is not None:
try:
self.connection = pika.SelectConnection(self.parameters,
on_open_callback,
on_open_error_callback,
on_close_callback,
stop_ioloop_on_close,
)
except:
tolog('MQ ARGO: Exception received while trying to open select connection to message server: ' + str(sys.exc_info()))
raise
def create_connection_parameters(self):
tolog("MQ ARGO: create connection parameters, server = " + self.host + " port = " + str(self.port))
# need to set credentials to login to the message server
#self.credentials = pika.PlainCredentials(self.username,self.password)
self.credentials = pika.credentials.ExternalCredentials()
ssl_options_dict = {
"certfile": self.ssl_cert,
"keyfile": self.ssl_key,
"ca_certs": self.ssl_ca_certs,
"cert_reqs": ssl.CERT_REQUIRED,
}
#logger.debug(str(ssl_options_dict))
# setup our connection parameters
self.parameters = pika.ConnectionParameters(
host = self.host,
port = self.port,
virtual_host = self.virtual_host,
credentials = self.credentials,
socket_timeout = self.socket_timeout,
ssl = True,
ssl_options = ssl_options_dict,
)
def create_queue(self,name,routing_key):
# declare a random queue which this job will use to receive messages
# durable = survive reboots of the broker
# exclusive = only current connection can access this queue
# auto_delete = queue will be deleted after connection is closed
self.channel.queue_declare(
queue = str(name),
durable = self.queue_is_durable,
exclusive = self.queue_is_exclusive,
auto_delete = self.queue_is_auto_delete
)
# now bind this queue to the exchange, using a routing key
# any message submitted to the exchange with the
# routing key will appear on this queue
self.channel.queue_bind(exchange=self.exchange_name,
queue=str(name),
routing_key=str(routing_key)
)
def close(self):
self.channel.close()
self.connection.close()
#self.channel = None
#self.connection = None
def send_msg(self,
message_body,
routing_key,
exchange_name = None,
message_headers = {},
priority = 0, # make message persistent
delivery_mode = 2, # default
):
if exchange_name is None:
exchange_name = self.exchange_name
timestamp = time.time()
# create the message properties
properties = pika.BasicProperties(
delivery_mode = delivery_mode,
priority = priority,
timestamp = timestamp,
headers = message_headers,
)
tolog("MQ ARGO: sending message body:\n" + str(message_body))
tolog('MQ ARGO: sending message to exchange: ' + self.exchange_name)
tolog('MQ ARGO: sending message with routing key: ' + routing_key)
self.channel.basic_publish(
exchange = exchange_name,
routing_key = routing_key,
body = message_body,
properties = properties,
)
def receive_msg(self,queue_name, no_ack = False):
# retrieve one message
method, properties, body = self.channel.basic_get(queue=queue_name, no_ack=no_ack)
return method,properties,body
def purge_queue(self,queue_name):
self.channel.queue_purge(queue = queue_name)
|
|
import os
import sys
import utils
import logging
import traceback
import datetime
import hash_version
import copy
import fnmatch
from sets import Set
class InvalidPlatform(Exception):
def __init__(self, plat):
self._plat = plat
def __str__(self):
return "Invalid platform detected: %s" % self._plat
class DontExistsFile(Exception):
def __init__(self, source_filename):
self._source_filename = source_filename
def __str__(self):
return 'Dont exists file %s' % self._source_filename
class FailPrepare(Exception):
def __init__(self, node):
self._node = node
def __str__(self):
return ('Failing preparing package: %s' % self._node.get_package_name())
class AmbiguationLibs(Exception):
def __init__(self, kind, package, build_mode):
self._kind = kind
self._package = package
self._build_mode = build_mode
def __str__(self):
return "Ambiguation in %s in %s. Mode: %s. Candidates:" % (self._kind, self._package, self._build_mode)
class NotFoundInDataset(Exception):
def __init__(self, msg):
self._msg = msg
def __str__(self):
return "%s" % self._msg
class FailThirdParty(Exception):
def __init__(self, msg):
self._msg = msg
def __str__(self):
return "%s" % self._msg
class Found(Exception):
pass
def prepare_cmakefiles(cmakefiles):
if not os.path.isdir(cmakefiles):
cmakefiles_temp = cmakefiles + '.tmp'
logging.info("preparing cmaki: {}".format(cmakefiles))
logging.info("clone cmaki: {}".format(cmakefiles_temp))
utils.tryremove_dir(cmakefiles_temp)
utils.safe_system('git clone %s %s' % (CMAKELIB_URL, cmakefiles_temp))
utils.move_folder_recursive(cmakefiles_temp, cmakefiles)
utils.tryremove_dir(cmakefiles_temp)
def get_identifier(mode):
env = os.environ.copy()
cmaki_install = env['CMAKI_INSTALL']
if utils.is_windows():
script_identifier = os.path.join(cmaki_install, 'cmaki_identifier.exe')
else:
script_identifier = os.path.join(cmaki_install, 'cmaki_identifier.sh')
if not os.path.isfile(script_identifier):
raise Exception("there is no {} script".format(script_identifier))
env['CMAKI_INFO'] = mode
return list(utils.get_stdout(script_identifier, env=env))[0]
def search_fuzzy(data, fuzzy_key, fallback='default'):
for key in data:
if fnmatch.fnmatch(fuzzy_key, key):
return data[key]
else:
if fallback in data:
return data[fallback]
else:
logging.error("not found 'default' platform or %s" % fuzzy_key)
raise Exception("not found '{}'".format(fuzzy_key))
#
# INMUTABLE GLOBALS
#
# HTTP_URL_NPSERVER = 'http://artifacts.myftp.biz:8080/sources'
HTTP_URL_NPSERVER = 'http://192.168.0.4:8080/sources'
CMAKELIB_URL='https://github.com/makiolo/cmaki.git'
prefered = {}
prefered['Debug'] = ['Debug', 'RelWithDebInfo', 'Release']
prefered['RelWithDebInfo'] = ['RelWithDebInfo', 'Release', 'Debug']
prefered['Release'] = ['Release', 'RelWithDebInfo', 'Debug']
magic_invalid_file = '__not_found__'
exceptions_fail_group = (OSError, IOError, )
exceptions_fail_program = (KeyboardInterrupt, )
uncompress_strip_default = '.'
uncompress_prefix_default = '.'
priority_default = 50
build_unittests_foldername = 'unittest'
# detect platform
platform = get_identifier('ALL')
arch = get_identifier('ARCH')
operative_system = get_identifier('OS')
somask_id = operative_system[0]
archs = {platform: arch}
platforms = [platform]
logging.info('Detecting platform from script like: {} / {}'.format(platform, arch))
alias_priority_name = { 10: 'minimal',
20: 'tools',
30: 'third_party' }
alias_priority_name_inverse = {v: k for k, v in alias_priority_name.items()}
def is_valid(package_name, mask):
return (mask.find(somask_id) != -1) and (package_name != 'dummy')
def is_blacklisted(blacklist_file, no_blacklist, package_name):
blacklisted = False
if os.path.exists(blacklist_file):
with open(blacklist_file, 'rt') as f:
for line in f.readlines():
if line.strip() == package_name:
blacklisted = True
break
# --no-blacklist can annular effect of blacklist
if blacklisted and (package_name in no_blacklist):
blacklisted = False
return blacklisted
class ThirdParty:
def __init__(self, user_parameters, name, parameters):
self.user_parameters = user_parameters
self.name = name
self.parameters = parameters
self.depends = []
self.exceptions = []
self.interrupted = False
self.ret = 0 # Initial return code
self.fail_stage = ""
self.blacklisted = is_blacklisted(self.user_parameters.blacklist, self.user_parameters.no_blacklist, self.get_package_name())
# para publicar que esta en la blacklist solo una vez
self.published_invalidation = False
def __hash__(self):
return hash((self.get_package_name(), self.get_priority(), self.get_mask()))
def __eq__(self, other):
return (self.get_package_name() == other.get_package_name()) and (self.get_priority() == other.get_priority()) and (self.get_mask() == other.get_mask())
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s (%s)" % (self.get_package_name(), self.get_mask())
def __str__(self):
return "%s (%s)" % (self.get_package_name(), self.get_mask())
def get_uncompress_strip(self, pos = 0):
parms = self.parameters
try:
if isinstance(parms['uncompress_strip'], list):
return parms['uncompress_strip'][pos]
else:
return parms['uncompress_strip']
except KeyError:
# default value
return uncompress_strip_default
def get_uncompress_prefix(self, pos = 0):
parms = self.parameters
try:
if isinstance(parms['uncompress_prefix'], list):
return parms['uncompress_prefix'][pos]
else:
return parms['uncompress_prefix']
except KeyError:
# default value
return uncompress_prefix_default
def get_uncompress(self, pos = 0):
parms = self.parameters
try:
if parms['uncompress'] is not None:
if isinstance(parms['uncompress'], list):
return (parms['uncompress'][pos].find(somask_id) != -1)
else:
return (parms['uncompress'].find(somask_id) != -1)
else:
return False
except KeyError:
# default value
return True
def get_depends_raw(self):
return self.depends
def get_depends(self):
parms = self.parameters
try:
return parms['depends']
except KeyError:
# default value
return None
def get_generate_custom_script(self, source_dir):
path_build = self.get_path_custom_script(source_dir, name='.build')
build_content = self.get_build_script_content()
if build_content is not None:
with open(path_build, 'wt') as f:
f.write(build_content)
def get_path_custom_script(self, source_folder, name = 'build'):
if utils.is_windows():
path_build = os.path.join(source_folder, name + '.cmd')
else:
path_build = os.path.join(source_folder, name + '.sh')
return path_build
def has_custom_script(self, source_folder):
script_custom = os.path.exists( self.get_path_custom_script(source_folder) )
return (self.get_build_script_content() is not None) or script_custom
def get_build_script_content(self):
parms = self.parameters
try:
if not utils.is_windows():
return parms['build']
else:
return parms['build_windows']
except KeyError:
# default value
return None
def get_source(self):
parms = self.parameters
try:
source = parms['source']
if source is not None:
if not isinstance(source, list):
return [source]
else:
return source
else:
return []
except KeyError:
# default value
return []
def get_source_filename(self, position=0):
parms = self.parameters
try:
return parms['source_filename']
except KeyError:
# default value
source = self.get_source()[position]
filename = source.split('/')[-1]
return filename
def get_sources_all(self, position=0):
parms = self.parameters
try:
return parms['sources_all']
except KeyError:
return False
def get_before_copy(self):
parms = self.parameters
try:
return parms['before_copy']
except KeyError:
# default value
return []
def get_short_path(self):
parms = self.parameters
try:
return parms['short_path']
except KeyError:
# default value
return False
def has_library(self, platform_info):
package = self.get_package_name()
return ((('static' in platform_info) and (package != 'dummy')) or (('dynamic' in platform_info) and (package != 'dummy')))
def needs(self, node):
if node.is_valid():
self.depends.append(node)
def get_package_name(self):
return self.name
def get_package_name_norm(self):
package = self.get_package_name()
for c in '-\\/:*?"<>|':
package = package.replace(c, '_')
return package
def get_package_name_norm_upper(self):
package_norm = self.get_package_name_norm()
return package_norm.upper()
def set_version(self, newversion):
self.parameters['version'] = newversion
def get_version(self):
parms = self.parameters
try:
version = parms['version']
if version is None:
return '0.0.0.0'
else:
return version
except KeyError:
if self.get_package_name() != 'dummy':
raise Exception('[%s] Version is a mandatory field.' % self.get_package_name())
def get_version_manager(self):
parms = self.parameters
try:
version = self.get_version()
if version == '0.0.0.0':
return parms['version_manager']
else:
# si tiene version -> no usar renombrado git
return None
except KeyError:
return None
def get_cmake_target(self):
parms = self.parameters
try:
return parms['cmake_target']
except KeyError:
return 'install'
def get_post_install(self):
parms = self.parameters
try:
return parms['post_install']
except KeyError:
return []
def get_priority(self):
parms = self.parameters
try:
return int(parms['priority'])
except KeyError:
return priority_default
def is_packing(self):
parms = self.parameters
try:
return parms['packing']
except KeyError:
# default value
return True
def get_branch(self):
try:
return self.parameters['branch']
except KeyError:
# default value
return None
def get_build_modes(self):
parms = self.parameters
build_modes = []
try:
if 'MODE' in os.environ and (os.environ['MODE'] != 'UNDEFINED'):
build_modes.append(os.environ['MODE'])
else:
mode = parms['mode']
if mode.find('d') != -1:
build_modes.append('Debug')
if mode.find('i') != -1:
build_modes.append('RelWithDebInfo')
if mode.find('r') != -1:
build_modes.append('Release')
except KeyError:
# no mode provided
build_modes.append('Debug')
build_modes.append('RelWithDebInfo')
build_modes.append('Release')
return build_modes
def get_mask(self):
parms = self.parameters
try:
return parms['mask']
except KeyError:
return somask_id
def is_valid(self):
if self.blacklisted:
if (not self.published_invalidation):
logging.debug('%s is not built because is blacklisted in %s' % (self.get_package_name(), os.path.basename(self.user_parameters.blacklist)))
self.published_invalidation = True
return False
return is_valid(self.get_package_name(), self.get_mask())
def resolver(self, resolved, seen):
seen.append(self)
for edge in self.depends:
if edge not in resolved:
if edge in seen:
raise Exception('Circular reference detected: %s and %s' % (self.get_package_name(), edge.name))
edge.resolver(resolved, seen)
if self.is_valid():
resolved.append(self)
seen.remove(self)
def get_targets(self):
try:
return self.parameters['targets']
except KeyError:
# default value
return []
def get_exclude_from_all(self):
parms = self.parameters
try:
return parms['exclude_from_all']
except KeyError:
# default value
return False
def get_exclude_from_clean(self):
parms = self.parameters
try:
return parms['exclude_from_clean']
except KeyError:
# default value
return False
def get_install(self):
parms = self.parameters
try:
return parms['install']
except KeyError:
# default value
return True
def get_unittest(self):
parms = self.parameters
try:
return parms['unittest']
except KeyError:
# default value
return None
def get_cmake_prefix(self):
parms = self.parameters
try:
cmake_prefix = parms['cmake_prefix']
if cmake_prefix.endswith('CMakeLists.txt'):
return os.path.dirname(cmake_prefix)
return cmake_prefix
except KeyError:
# default value
return "."
def get_generator_targets(self, plat, compiler_c, compiler_cpp, ext_sta, ext_dyn):
'''
TODO: create new class "target"
'''
superpackage = self.get_package_name_norm()
for targets in self.get_targets():
for target_name in targets:
platform_info = None
platform_extra = None
target_info = targets[target_name]
if 'info' in target_info:
outputinfo = search_fuzzy(target_info['info'], plat)
if outputinfo is not None:
platform_info = copy.deepcopy( outputinfo )
if 'extra' in target_info:
outputinfo_extra = search_fuzzy(target_info['extra'], plat)
if outputinfo_extra is not None:
platform_extra = copy.deepcopy( outputinfo_extra )
if (platform_info is not None) and (platform_extra is not None):
platform_info = utils.smart_merge(platform_info, platform_extra)
# variables for use in "info" and "extra"
platform_info = utils.apply_replaces_vars(platform_info, {
'TARGET': target_name,
'TARGET_UPPER': target_name.upper(),
'PACKAGE': superpackage,
'PACKAGE_UPPER': superpackage.upper(),
'PLATFORM': plat,
'COMPILER': os.path.basename(compiler_cpp),
'EXT_DYN': ext_dyn,
'EXT_STA': ext_sta,
'ARCH': archs[plat],
})
if platform_info is None:
logging.error('No platform info in package %s, platform %s' % (superpackage, plat))
logging.error("%s" % targets)
sys.exit(1)
yield (target_name, platform_info)
def have_any_in_target(self, plat, key, compiler_replace_maps):
any_static = False
for compiler_c, compiler_cpp, _, ext_sta, ext_dyn, _, _ in self.compiler_iterator(plat, compiler_replace_maps):
for package, platform_info in self.get_generator_targets(plat, compiler_c, compiler_cpp, ext_sta, ext_dyn):
if key in platform_info:
any_static = True
return any_static
def get_generate_find_package(self):
parms = self.parameters
try:
return parms['generate_find_package']
except KeyError:
# default value
return True
def compiler_iterator(self, plat, compiler_replace_maps):
plat_parms = search_fuzzy(self.parameters['platforms'], plat)
try:
generator = plat_parms['generator']
except KeyError:
generator = None
try:
compilers = plat_parms['compiler']
except KeyError:
compilers = None
# resolve map
compiler_replace_resolved = {}
for var, value in compiler_replace_maps.iteritems():
newvalue = value
newvalue = newvalue.replace('$PLATFORM', plat)
compiler_replace_resolved[var] = newvalue
compiler_replace_resolved['$ARCH'] = archs[plat]
compiler_replace_resolved['${ARCH}'] = archs[plat]
# get compiler info
compiler = get_identifier('COMPILER')
ext_dyn = plat_parms['ext_dyn']
ext_sta = plat_parms['ext_sta']
if compilers is None:
# if utils.is_windows():
compilers = [('%s, %s' % (compiler, compiler))]
# else:
# compilers = [('%s, %s' % (os.environ.get('CC', 'gcc'), os.environ.get('CXX', 'g++')))]
for compiler in compilers:
compilers_tuple = compiler.split(',')
assert(len(compilers_tuple) == 2)
compiler_c = compilers_tuple[0].strip()
compiler_cpp = compilers_tuple[1].strip()
compiler_c = utils.apply_replaces(compiler_c, compiler_replace_resolved)
compiler_cpp = utils.apply_replaces(compiler_cpp, compiler_replace_resolved)
env_new = {}
env_modified = os.environ.copy()
for env_iter in [env_modified, env_new]:
env_iter['COMPILER'] = str(compiler)
env_iter['PLATFORM'] = str(plat)
env_iter['PACKAGE'] = str(self.get_package_name())
env_iter['VERSION'] = str(self.get_version())
env_iter['ARCH'] = str(archs[plat])
# if (compiler_c != 'default') and (compiler_cpp != 'default'):
# env_iter['CC'] = str(compiler_c)
# env_iter['CXX'] = str(compiler_cpp)
try:
environment = plat_parms['environment']
try:
environment_remove = environment['remove']
for key, values in environment_remove.iteritems():
try:
oldpath = env_iter[key]
except KeyError:
oldpath = ''
uniq_values = Set()
for v in values:
v = utils.apply_replaces(v, compiler_replace_resolved)
uniq_values.add(v)
for v in uniq_values:
oldpath = oldpath.replace(v, '')
env_iter[key] = oldpath
except KeyError:
pass
# insert front with seprator = ":"
try:
environment_push_front = environment['push_front']
for key, values in environment_push_front.iteritems():
try:
oldpath = env_iter[key]
except KeyError:
oldpath = ''
uniq_values = Set()
for v in values:
v = utils.apply_replaces(v, compiler_replace_resolved)
uniq_values.add(v)
for v in uniq_values:
if len(oldpath) == 0:
separator = ''
else:
# -L / -I / -R use space
if v.startswith('-'):
separator = ' '
else:
separator = ':'
oldpath = str('%s%s%s' % (v, separator, oldpath))
env_iter[key] = oldpath
except KeyError:
pass
# insert back with separator " "
try:
environment_flags = environment['flags']
for key, values in environment_flags.iteritems():
try:
oldpath = env_iter[key]
except KeyError:
oldpath = ''
uniq_values = Set()
for v in values:
v = utils.apply_replaces(v, compiler_replace_resolved)
uniq_values.add(v)
for v in uniq_values:
if len(oldpath) == 0:
separator = ''
else:
separator = ' '
oldpath = str('%s%s%s' % (oldpath, separator, v))
env_iter[key] = oldpath
except KeyError:
pass
# insert new environment variables
try:
environment_assign = environment['assign']
for key, value in environment_assign.iteritems():
value = utils.apply_replaces(value, compiler_replace_resolved)
env_iter[key] = value
except KeyError:
pass
except KeyError:
pass
yield (compiler_c, compiler_cpp, generator, ext_sta, ext_dyn, env_modified, env_new)
def remove_cmake3p(self, cmake3p_dir):
package_cmake3p = os.path.join(cmake3p_dir, self.get_base_folder())
logging.debug('Removing cmake3p %s' % package_cmake3p)
if os.path.exists(package_cmake3p):
utils.tryremove_dir(package_cmake3p)
for dep in self.get_depends_raw():
dep.remove_cmake3p(cmake3p_dir)
def get_base_folder(self):
package = self.get_package_name()
version = self.get_version()
return '%s-%s' % (package, version)
def get_workspace(self, plat):
package = self.get_package_name()
version = self.get_version()
return '%s-%s-%s' % (package, version, plat)
def get_build_directory(self, plat, build_mode):
package = self.get_package_name()
version = self.get_version()
if not self.get_short_path():
return '.build_%s-%s-%s_%s' % (package, version, plat, build_mode)
else:
return '.bs_%s%s%s%s' % (package[:3], version[-1:], plat, build_mode)
def get_download_directory(self):
package = self.get_package_name()
return '.download_%s' % package
def get_original_directory(self):
package = self.get_package_name()
return '.download_original_%s' % package
def apply_replace_maps(self, compiler_replace_maps):
package = self.get_package_name()
package_norm = self.get_package_name_norm()
to_package = os.path.abspath(package)
utils.trymkdir(to_package)
with utils.working_directory(to_package):
basedir = os.path.abspath('..')
compiler_replace_maps['$%s_BASE' % package_norm] = os.path.join(basedir, self.get_workspace('$PLATFORM'), self.get_base_folder())
def generate_scripts_headers(self, compiler_replace_maps):
package = self.get_package_name()
package_norm = self.get_package_name_norm()
version = self.get_version()
to_package = os.path.abspath(package)
utils.trymkdir(to_package)
with utils.working_directory(to_package):
basedir = os.path.abspath('..')
# generate find.cmake
build_directory = self.get_build_directory(r"${CMAKI_PLATFORM}", r"${GLOBAL_BUILD_MODE}")
with open('find.cmake', 'wt') as f:
# TODO: remove vars
# ONLY HOME
f.write("SET(%s_VERSION %s CACHE STRING \"Last version compiled ${PACKAGE}\" FORCE)\n" % (package_norm, version))
f.write("file(TO_NATIVE_PATH \"${PACKAGE_BUILD_DIRECTORY}/../%s-%s-${CMAKI_PLATFORM}/%s-%s/${CMAKI_PLATFORM}/include\" %s_INCLUDE)\n" % (package, version, package, version, package_norm))
f.write("file(TO_NATIVE_PATH \"${PACKAGE_BUILD_DIRECTORY}/../%s-%s-${CMAKI_PLATFORM}/%s-%s/${CMAKI_PLATFORM}\" %s_LIBDIR)\n" % (package, version, package, version, package_norm))
f.write("file(TO_NATIVE_PATH \"${PACKAGE_BUILD_DIRECTORY}/../%s\" %s_BUILD)\n" % (build_directory, package_norm))
f.write("SET(%s_INCLUDE ${%s_INCLUDE} CACHE STRING \"Include dir %s\" FORCE)\n" % (package_norm, package_norm, package))
f.write("SET(%s_LIBDIR ${%s_LIBDIR} CACHE STRING \"Libs dir %s\" FORCE)\n" % (package_norm, package_norm, package))
f.write("SET(%s_BUILD ${%s_BUILD} CACHE STRING \"Build dir %s\" FORCE)\n" % (package_norm, package_norm, package))
# genereate find.script / cmd
if utils.is_windows():
build_directory = self.get_build_directory("%PLATFORM%", "%BUILD_MODE%")
with open('find.cmd', 'wt') as f:
f.write("set %s_VERSION=%s\n" % (package_norm, version))
f.write("set %s_HOME=%s\%s-%s-%%PLATFORM%%\%s-%s\%%PLATFORM%%\n" % (package_norm, basedir, package, version, package, version))
f.write("set %s_BASE=%s\%s-%s-%%PLATFORM%%\%s-%s\n" % (package_norm, basedir, package, version, package, version))
f.write("set SELFHOME=%s\%%PACKAGE%%-%%VERSION%%-%%PLATFORM%%\%%PACKAGE%%-%%VERSION%%\%%PLATFORM%%\n" % (basedir))
f.write("set SELFBASE=%s\%%PACKAGE%%-%%VERSION%%-%%PLATFORM%%\%%PACKAGE%%-%%VERSION%%\n" % (basedir))
f.write("set %s_BUILD=%s\%s\n" % (package_norm, basedir, build_directory))
f.write(r"md %SELFHOME%")
f.write("\n")
else:
build_directory = self.get_build_directory("${PLATFORM}", "${BUILD_MODE}")
with open('find.script', 'wt') as f:
f.write("#!/bin/bash\n")
f.write("%s_VERSION=%s\n" % (package_norm, version))
f.write("%s_HOME=%s/%s-%s-$PLATFORM/%s-%s/$PLATFORM\n" % (package_norm, basedir, package, version, package, version))
f.write("%s_BASE=%s/%s-%s-$PLATFORM/%s-%s\n" % (package_norm, basedir, package, version, package, version))
f.write("SELFHOME=%s/$PACKAGE-$VERSION-$PLATFORM/$PACKAGE-$VERSION/$PLATFORM\n" % (basedir))
f.write("SELFBASE=%s/$PACKAGE-$VERSION-$PLATFORM/$PACKAGE-$VERSION\n" % (basedir))
f.write("%s_BUILD=%s/%s\n" % (package_norm, basedir, build_directory))
f.write("mkdir -p $SELFHOME\n")
def remove_cmakefiles(self):
utils.tryremove('CMakeCache.txt')
utils.tryremove('cmake_install.cmake')
utils.tryremove('install_manifest.txt')
utils.tryremove_dir('CMakeFiles')
def remove_scripts_headers(self):
package = self.get_package_name()
to_package = os.path.abspath(package)
utils.trymkdir(to_package)
with utils.working_directory(to_package):
utils.tryremove('find.cmake')
utils.tryremove('find.script')
utils.tryremove('find.cmd')
utils.tryremove('.build.sh')
utils.tryremove('.build.cmd')
utils.tryremove_dir_empty(to_package)
def generate_3rdpartyversion(self, output_dir):
package = self.get_package_name()
package_norm_upper = self.get_package_name_norm_upper()
version = self.get_version()
packing = self.is_packing()
if not packing:
logging.debug("package %s, don't need 3rdpartyversion" % package)
return
thirdparty_path = os.path.join(output_dir, '3rdpartyversions')
utils.trymkdir(thirdparty_path)
with utils.working_directory(thirdparty_path):
with open('%s.cmake' % package, 'wt') as f:
f.write('SET(%s_REQUIRED_VERSION %s EXACT)\n' % (package_norm_upper, version))
def _smart_uncompress(self, position, package_file_abs, uncompress_directory, destiny_directory, compiler_replace_maps):
uncompress = self.get_uncompress(position)
uncompress_strip = self.get_uncompress_strip(position)
uncompress_prefix = self.get_uncompress_prefix(position)
if uncompress:
if (uncompress_strip == uncompress_strip_default) and (uncompress_prefix == uncompress_prefix_default):
# case fast (don't need intermediate folder)
ok = utils.extract_file(package_file_abs, destiny_directory, self.get_first_environment(compiler_replace_maps))
else:
source_with_strip = os.path.join(uncompress_directory, uncompress_strip)
destiny_with_prefix = os.path.join(destiny_directory, uncompress_prefix)
ok = utils.extract_file(package_file_abs, uncompress_directory, self.get_first_environment(compiler_replace_maps))
utils.move_folder_recursive(source_with_strip, destiny_with_prefix)
utils.tryremove_dir(source_with_strip)
if not ok:
raise Exception('Invalid uncompressed package %s - %s' % (package, package_file_abs))
def _prepare_third_party(self, position, url, build_directory, compiler_replace_maps):
package = self.get_package_name()
source_filename = self.get_source_filename(position)
uncompress_strip = self.get_uncompress_strip(position)
uncompress_prefix = self.get_uncompress_prefix(position)
uncompress = self.get_uncompress(position)
uncompress_directory = self.get_download_directory()
utils.trymkdir(uncompress_directory)
logging.debug('source_filename = %s' % source_filename)
logging.debug('uncompress_strip = %s' % uncompress_strip)
logging.debug('uncompress_prefix = %s' % uncompress_prefix)
logging.debug('uncompress = %s' % uncompress)
# resolve url vars
url = url.replace('$HTTP_URL_NPSERVER', HTTP_URL_NPSERVER)
# files in svn
if(url.startswith('svn://')):
# strip is not implemmented with svn://
utils.tryremove_dir( build_directory )
logging.info('Download from svn: %s' % url)
self.safe_system( 'svn co %s %s' % (url, build_directory), compiler_replace_maps )
# utils.tryremove_dir( os.path.join(build_directory, '.svn') )
elif(url.endswith('.git') or (url.find('github') != -1) or (url.find('bitbucket') != -1)) and not ( url.endswith('.zip') or url.endswith('.tar.gz') or url.endswith('.tar.bz2') or url.endswith('.tgz') or url.endswith('.py') ):
# strip is not implemmented with git://
utils.tryremove_dir( build_directory )
logging.info('Download from git: %s' % url)
branch = self.get_branch()
extra_cmd = ''
if branch is not None:
logging.info('clonning to branch %s' % branch)
extra_cmd = '%s' % branch
self.safe_system('git clone --depth=200 %s %s' % (url, build_directory), compiler_replace_maps)
# self.safe_system('git clone %s %s' % (url, build_directory), compiler_replace_maps)
with utils.working_directory(build_directory):
self.safe_system('git checkout {}'.format(extra_cmd), compiler_replace_maps)
self.safe_system('git submodule init', compiler_replace_maps)
self.safe_system('git submodule update', compiler_replace_maps)
# depends_file = self.user_parameters.depends
# if depends_file is not None:
# with utils.working_directory(build_directory):
# # leer el fichero de dependencias
# if os.path.exists(depends_file):
# data = utils.deserialize(depends_file)
# else:
# data = {}
#
# # obedecer, si trae algo util
# if package in data:
# logging.debug('data package version is %s' % data[package])
# try:
# git_version = hash_version.to_git_version(build_directory, data[package])
# logging.debug('data package in git version is %s' % git_version)
# logging.debug('updating to revision %s' % git_version)
# self.safe_system('git reset --hard %s' % git_version, compiler_replace_maps)
# except AssertionError:
# logging.info('using HEAD')
#
# # actualizar y reescribir
# revision = hash_version.get_last_version(build_directory)
# assert(len(revision) > 0)
# data[package] = revision
# utils.serialize(data, depends_file)
# else:
# logging.warning('not found depends file, using newest changeset')
# file in http
elif ( url.startswith('http://')
or url.startswith('https://')
or url.endswith('.zip')
or url.endswith('.tar.gz')
or url.endswith('.tar.bz2')
or url.endswith('.tgz')
or url.endswith('.py') ):
logging.info('Download from url: %s' % url)
# download to source_filename
package_file_abs = os.path.join(uncompress_directory, source_filename)
utils.download_from_url(url, package_file_abs)
if os.path.isfile(package_file_abs):
# uncompress in download folder for after generate a patch with all changes
if not os.path.isdir( self.get_original_directory() ):
utils.trymkdir( self.get_original_directory() )
logging.debug('preparing original uncompress')
# uncompress in original
self._smart_uncompress(position, package_file_abs, uncompress_directory, self.get_original_directory(), compiler_replace_maps)
else:
logging.debug('skipping original uncompress (already exists)')
# uncompress in intermediate build directory
self._smart_uncompress(position, package_file_abs, uncompress_directory, build_directory, compiler_replace_maps)
else:
raise DontExistsFile(source_filename)
else:
raise Exception('Invalid source: %s - %s' % (package, url))
def prepare_third_party(self, build_directory, compiler_replace_maps):
utils.trymkdir(build_directory)
package = self.get_package_name()
version = self.get_version()
sources_all = self.get_sources_all()
exceptions = []
i = 0
for source_url in self.get_source():
if (source_url is None) or (len(source_url) <= 0) or (source_url == 'skip'):
logging.warning('[%s %s] Skipping preparation ...' % (package, version))
else:
logging.warning('[%s %s] trying prepare from %s ...' % (package, version, source_url))
try:
self._prepare_third_party(i, source_url, build_directory, compiler_replace_maps)
if not sources_all:
# sources_all = false ---> any source
# sources_all = Trie ----> all source
break
except exceptions_fail_group + exceptions_fail_program:
raise
except:
exceptions.append(sys.exc_info())
i += 1
if len(exceptions) > 0:
i = 0
for exc_type, exc_value, exc_traceback in exceptions:
print "---- Exception #%d / %d ----------" % (i+1, len(exceptions))
traceback.print_exception(exc_type, exc_value, exc_traceback)
print "----------------------------------"
i += 1
raise FailPrepare(self)
def get_prefered_build_mode(self, prefered_build_mode_list):
build_modes = self.get_build_modes()
assert(len(prefered_build_mode_list) > 0)
prefered_build_mode = prefered_build_mode_list[0]
while (prefered_build_mode not in build_modes) and (len(prefered_build_mode_list)>0):
prefered_build_mode_list.pop(0)
if len(prefered_build_mode_list) > 0:
prefered_build_mode = prefered_build_mode_list[0]
return prefered_build_mode
def generate_cmake_condition(self, platforms, compiler_replace_maps):
target_uniques = Set()
condition = ''
i = 0
for plat in platforms:
for compiler_c, compiler_cpp, _, ext_sta, ext_dyn, _, _ in self.compiler_iterator(plat, compiler_replace_maps):
for package, platform_info in self.get_generator_targets(plat, compiler_c, compiler_cpp, ext_sta, ext_dyn):
package_lower = package.lower()
if (package_lower not in target_uniques) and (package_lower != 'dummy'):
target_uniques.add(package_lower)
if self.has_library(platform_info):
if i == 0:
condition += '(NOT TARGET %s)' % package_lower
else:
condition += ' OR (NOT TARGET %s)' % package_lower
i += 1
return condition
def _search_library(self, rootdir, special_pattern):
'''
3 cases:
string
pattern as special string
list of strings
'''
logging.info('-- searching in {} with pattern: {}'.format(rootdir, special_pattern))
if special_pattern is None:
logging.warning('Failed searching lib in %s' % rootdir)
return (False, None)
package = self.get_package_name()
if isinstance(special_pattern, list):
utils.verbose(self.user_parameters, 'Searching list %s' % special_pattern)
valid_ff = None
for ff in special_pattern:
valid, valid_ff = self._search_library(rootdir, utils.get_norm_path(ff))
if valid:
break
return (valid, valid_ff)
elif special_pattern.startswith('/') and special_pattern.endswith('/'):
pattern = special_pattern[1:-1]
utils.verbose(self.user_parameters, 'Searching rootdir %s, pattern %s' % (rootdir, pattern))
files_found = utils.rec_glob(rootdir, pattern, deep_max=10)
utils.verbose(self.user_parameters, 'Candidates %s' % files_found)
if len(files_found) == 1:
relfile = os.path.relpath(files_found[0], rootdir)
return (True, utils.get_norm_path(relfile))
elif len(files_found) == 0:
msg = 'No library found in %s with pattern %s' % (rootdir, pattern)
logging.debug(msg)
return (False, None)
else:
msg = "Ambiguation in %s" % (package)
logging.debug(msg)
return (False, None)
else:
pathfull = os.path.join(rootdir, special_pattern)
utils.verbose(self.user_parameters, 'Checking file %s' % pathfull)
if os.path.exists(pathfull):
return (True, utils.get_norm_path(special_pattern))
else:
return (False, None)
def search_library(self, workbase, dataset, kind, rootdir=None):
'''
can throw exception
'''
build_mode = self.get_prefered_build_mode(prefered[os.environ['MODE']])
if rootdir is None:
rootdir = workbase
utils.verbose(self.user_parameters, 'Searching rootdir %s' % (rootdir))
if (build_mode.lower() in dataset) and (kind in dataset[build_mode.lower()]):
special_pattern = dataset[build_mode.lower()][kind]
valid, valid_ff = self._search_library(rootdir, special_pattern)
if valid:
return valid_ff
else:
package = self.get_package_name()
raise AmbiguationLibs(kind, package, build_mode)
else:
raise NotFoundInDataset("Not found in dataset, searching %s - %s" % (build_mode.lower(), kind))
def search_library_noexcept(self, workbase, dataset, kind):
try:
try:
rootdir = os.path.abspath(workbase)
finalpath = self.search_library(workbase, dataset, kind, rootdir)
utils.superverbose(self.user_parameters, '[01] path: %s' % finalpath)
return finalpath
except AmbiguationLibs:
finalpath = '%s.%s' % (magic_invalid_file, kind)
utils.superverbose(self.user_parameters, '[02] path: %s' % finalpath)
return finalpath
except NotFoundInDataset:
finalpath = '%s.%s' % (magic_invalid_file, kind)
utils.superverbose(self.user_parameters, '[03] path: %s' % finalpath)
return finalpath
def check_parts_exists(self, workbase, package, target, dataset, kindlibs, build_modes=None):
'''
Asegura que todas las partes del target existen, devuelve True o False si todas las partes existen
workbase: directorio de instalacion base
package: nombre del paquete
target: nombre del target
dataset: es la estructura que contiene las estrategias de busqueda
{"debug": {"part1": ["*.dll", "*d.dll"]}, "release": {"part1": ["*_release.dll"]}}
kindlibs: tupla de partes a verificar, cada tupla representa (tipo, obligatoriedad)
build_modes: restringuir la busqueda a ciertos build modes
'''
all_ok = True
if build_modes is None:
build_modes = self.get_build_modes()
for build_mode in build_modes:
for kind, must in kindlibs:
try:
part_fullpath = os.path.join(workbase, self.search_library_noexcept(workbase, dataset, kind))
if not os.path.exists(part_fullpath):
if must:
logging.error("[%s] Don't found %s in %s. Mode: %s. Path: %s. Dataset: %s" % (package, kind, target, build_mode, part_fullpath, dataset))
all_ok = False
else:
msg = "[%s] Don't found %s in %s. Mode: %s. Path: %s" % (package, kind, target, build_mode, part_fullpath)
if build_mode != 'Release':
logging.warning(msg)
else:
logging.debug(msg)
except NotFoundInDataset as e:
if must:
logging.error("[ERROR] [NOT FOUND] [%s] %s" % (package, e))
all_ok = False
return all_ok
def is_invalid_lib(self, libpath):
return (libpath is None) or (utils.get_filename_no_ext(os.path.basename(libpath)) == magic_invalid_file)
def generate_cmakefiles(self, platforms, folder_output, compiler_replace_maps):
errors = 0
packing = self.is_packing()
if not packing:
logging.warning("package: %s don't need generate cmakefiles" % self.get_package_name())
return errors
oldcwd = os.getcwd()
utils.trymkdir(folder_output)
with utils.working_directory(folder_output):
superpackage = self.get_package_name()
superpackage_lower = superpackage.lower()
superpackage_upper = superpackage.upper()
build_modes = self.get_build_modes()
parameters = self.parameters
with open('%s-config.cmake' % superpackage_lower, 'wt') as f:
f.write('''CMAKE_POLICY(PUSH)
CMAKE_POLICY(VERSION 3.0)
cmake_minimum_required(VERSION 3.0)
cmake_policy(SET CMP0011 NEW)
''')
condition = self.generate_cmake_condition(platforms, compiler_replace_maps)
if(len(condition) > 0):
f.write('\nif(%s)\n' % condition)
f.write('''\ninclude(${CMAKI_PATH}/facts/facts.cmake)
cmaki_download_package()
file(TO_NATIVE_PATH "${_DIR}" %s_HOME)
file(TO_NATIVE_PATH "${_DIR}/${CMAKI_PLATFORM}" %s_PREFIX)
set(%s_HOME "${%s_HOME}" PARENT_SCOPE)
set(%s_PREFIX "${%s_PREFIX}" PARENT_SCOPE)
include(${_MY_DIR}/${CMAKI_PLATFORM}.cmake)
''' % (superpackage_upper, superpackage_upper, superpackage_upper, superpackage_upper, superpackage_upper, superpackage_upper))
if(len(condition) > 0):
f.write('\nendif()\n')
f.write('\nCMAKE_POLICY(POP)')
with open('%s-config-version.cmake' % superpackage_lower, 'wt') as f:
f.write('''\
cmake_minimum_required(VERSION 3.0)
cmake_policy(SET CMP0011 NEW)
include(${CMAKI_PATH}/facts/facts.cmake)
cmaki_package_version_check()
''')
for plat in platforms:
workspace = self.get_workspace(plat)
base_folder = self.get_base_folder()
for compiler_c, compiler_cpp, _, ext_sta, ext_dyn, env_modified, _ in self.compiler_iterator(plat, compiler_replace_maps):
with open('%s.cmake' % (plat), 'wt') as f:
install_3rdparty_dependencies = True
includes_set = []
definitions_set = []
system_depends_set = []
depends_set = Set()
for package, platform_info in self.get_generator_targets(plat, compiler_c, compiler_cpp, ext_sta, ext_dyn):
package_lower = package.lower()
package_upper = package.upper()
if self.has_library(platform_info) and (package != 'dummy'):
f.write('if(NOT TARGET %s)\n\n' % package_lower)
try:
add_3rdparty_dependencies = platform_info['add_3rdparty_dependencies']
except KeyError:
add_3rdparty_dependencies = True
try:
lib_provided = platform_info['lib_provided']
except KeyError:
lib_provided = True
if 'include' in platform_info:
include = platform_info['include']
for d in include:
includes_set.append(d)
# rename to definitions
if 'definitions' in platform_info:
definitions = platform_info['definitions']
for d in definitions:
definitions_set.append(d)
if 'system_depends' in platform_info:
system_depends = platform_info['system_depends']
for sd in system_depends:
system_depends_set.append(sd)
if ('targets_paths' in self.parameters):
targets_paths = self.parameters['targets_paths']
for key, value in targets_paths.iteritems():
f.write('file(TO_NATIVE_PATH "%s" %s)\n' % (value, key))
if ('executable' in platform_info) and (package != 'dummy'):
# a target in mode executable, dont need install
install_3rdparty_dependencies = False
if 'use_run_with_libs' in platform_info:
# if plat.startswith('win'):
if utils.is_windows():
f.write('file(TO_NATIVE_PATH "${_MY_DIR}/../../run_with_libs.cmd" %s_LAUNCHER)\n' % package_upper)
else:
f.write('file(TO_NATIVE_PATH "${_MY_DIR}/../../run_with_libs.sh" %s_LAUNCHER)\n' % package_upper)
executable = platform_info['executable']
workbase = os.path.join(oldcwd, workspace, base_folder, plat)
if not self.check_parts_exists(workbase, superpackage, package, executable, [('bin', True)], build_modes=['Release']):
errors += 1
release_bin = self.search_library_noexcept(workbase, executable, 'bin')
for suffix in ['', '_EXECUTABLE']:
if 'use_run_with_libs' in platform_info:
f.write('set(%s%s "${%s_LAUNCHER}" "${_DIR}/%s/%s" PARENT_SCOPE)\n' % (package_upper, suffix, package_upper, plat, utils.get_norm_path(release_bin, native=False)))
else:
f.write('set(%s%s "${_DIR}/%s/%s" PARENT_SCOPE)\n' % (package_upper, suffix, plat, utils.get_norm_path(release_bin, native=False)))
f.write('file(TO_NATIVE_PATH "${%s%s}" %s%s)\n' % (package_upper, suffix, package_upper, suffix))
f.write('\n')
if ('dynamic' in platform_info) and (package != 'dummy'):
dynamic = platform_info['dynamic']
# add depend
if add_3rdparty_dependencies:
f.write('list(APPEND %s_LIBRARIES %s)\n' % (superpackage_upper, package_lower))
# if plat.startswith('win'):
if utils.is_windows():
workbase = os.path.join(oldcwd, workspace, base_folder, plat)
if not self.check_parts_exists(workbase, superpackage, package, dynamic, [('dll', True), ('lib', lib_provided), ('pdb', False)]):
errors += 1
debug_dll = self.search_library_noexcept(workbase, dynamic, 'dll')
release_dll = self.search_library_noexcept(workbase, dynamic, 'dll')
relwithdebinfo_dll = self.search_library_noexcept(workbase, dynamic, 'dll')
minsizerel_dll = self.search_library_noexcept(workbase, dynamic, 'dll')
debug_lib = self.search_library_noexcept(workbase, dynamic, 'lib')
release_lib = self.search_library_noexcept(workbase, dynamic, 'lib')
relwithdebinfo_lib = self.search_library_noexcept(workbase, dynamic, 'lib')
minsizerel_lib = self.search_library_noexcept(workbase, dynamic, 'lib')
try:
relwithdebinfo_pdb = self.search_library(workbase, dynamic, 'pdb')
except Exception as e:
logging.debug('exception searching lib: %s' % e)
relwithdebinfo_pdb = None
try:
debug_pdb = self.search_library(workbase, dynamic, 'pdb')
except Exception as e:
logging.debug('exception searching lib: %s' % e)
debug_pdb = None
f.write('ADD_LIBRARY(%s SHARED IMPORTED)\n' % package_lower)
f.write('SET_PROPERTY(TARGET %s APPEND PROPERTY IMPORTED_CONFIGURATIONS DEBUG RELEASE RELWITHDEBINFO MINSIZEREL)\n' % package_lower)
f.write('SET_TARGET_PROPERTIES(%s PROPERTIES\n' % package_lower)
# dll
f.write('\tIMPORTED_LOCATION_DEBUG "${_DIR}/%s/%s"\n' % (plat, utils.get_norm_path(debug_dll, native=False)))
f.write('\tIMPORTED_LOCATION_RELEASE "${_DIR}/%s/%s"\n' % (plat, utils.get_norm_path(release_dll, native=False)))
f.write('\tIMPORTED_LOCATION_RELWITHDEBINFO "${_DIR}/%s/%s"\n' % (plat, utils.get_norm_path(relwithdebinfo_dll, native=False)))
f.write('\tIMPORTED_LOCATION_MINSIZEREL "${_DIR}/%s/%s"\n' % (plat, utils.get_norm_path(minsizerel_dll, native=False)))
f.write('\n')
# lib
if not self.is_invalid_lib(debug_lib):
f.write('\tIMPORTED_IMPLIB_DEBUG "${_DIR}/%s/%s"\n' % (plat, utils.get_norm_path(debug_lib, native=False)))
if not self.is_invalid_lib(release_lib):
f.write('\tIMPORTED_IMPLIB_RELEASE "${_DIR}/%s/%s"\n' % (plat, utils.get_norm_path(release_lib, native=False)))
if not self.is_invalid_lib(relwithdebinfo_lib):
f.write('\tIMPORTED_IMPLIB_RELWITHDEBINFO "${_DIR}/%s/%s"\n' % (plat, utils.get_norm_path(relwithdebinfo_lib, native=False)))
if not self.is_invalid_lib(minsizerel_lib):
f.write('\tIMPORTED_IMPLIB_MINSIZEREL "${_DIR}/%s/%s"\n' % (plat, utils.get_norm_path(minsizerel_lib, native=False)))
f.write('\n')
# pdb
if not self.is_invalid_lib(debug_pdb):
f.write('\tIMPORTED_PDB_DEBUG "${_DIR}/%s/%s"\n' % (plat, utils.get_norm_path(debug_pdb, native=False)))
if not self.is_invalid_lib(relwithdebinfo_pdb):
f.write('\tIMPORTED_PDB_RELWITHDEBINFO "${_DIR}/%s/%s"\n' % (plat, utils.get_norm_path(relwithdebinfo_pdb, native=False)))
f.write(')\n')
else:
workbase = os.path.join(oldcwd, workspace, base_folder, plat)
if not self.check_parts_exists(workbase, superpackage, package, dynamic, [('so', True)]):
errors += 1
debug_so = self.search_library_noexcept(workbase, dynamic, 'so')
release_so = self.search_library_noexcept(workbase, dynamic, 'so')
relwithdebinfo_so = self.search_library_noexcept(workbase, dynamic, 'so')
minsizerel_so = self.search_library_noexcept(workbase, dynamic, 'so')
try:
debug_so_full = os.path.join(oldcwd, workbase, debug_so)
debug_soname = utils.get_soname(debug_so_full, env=env_modified)
except Exception as e:
logging.debug('exception searching lib: %s' % e)
debug_soname = None
try:
release_so_full = os.path.join(oldcwd, workbase, release_so)
release_soname = utils.get_soname(release_so_full, env=env_modified)
except Exception as e:
logging.debug('exception searching lib: %s' % e)
release_soname = None
try:
relwithdebinfo_so_full = os.path.join(oldcwd, workbase, relwithdebinfo_so)
relwithdebinfo_soname = utils.get_soname(relwithdebinfo_so_full, env=env_modified)
except Exception as e:
logging.debug('exception searching lib: %s' % e)
relwithdebinfo_soname = None
try:
minsizerel_so_full = os.path.join(oldcwd, workbase, minsizerel_so)
minsizerel_soname = utils.get_soname(minsizerel_so_full, env=env_modified)
except Exception as e:
logging.debug('exception searching lib: %s' % e)
minsizerel_soname = None
f.write('ADD_LIBRARY(%s SHARED IMPORTED)\n' % package_lower)
f.write('SET_PROPERTY(TARGET %s APPEND PROPERTY IMPORTED_CONFIGURATIONS DEBUG RELEASE RELWITHDEBINFO MINSIZEREL)\n' % package_lower)
f.write('SET_TARGET_PROPERTIES(%s PROPERTIES\n' % package_lower)
# so
f.write('\tIMPORTED_LOCATION_DEBUG "${_DIR}/%s/%s"\n' % (plat, utils.get_norm_path(debug_so, native=False)))
f.write('\tIMPORTED_LOCATION_RELEASE "${_DIR}/%s/%s"\n' % (plat, utils.get_norm_path(release_so, native=False)))
f.write('\tIMPORTED_LOCATION_RELWITHDEBINFO "${_DIR}/%s/%s"\n' % (plat, utils.get_norm_path(relwithdebinfo_so, native=False)))
f.write('\tIMPORTED_LOCATION_MINSIZEREL "${_DIR}/%s/%s"\n' % (plat, utils.get_norm_path(minsizerel_so, native=False)))
f.write('\n')
# soname
if (debug_soname is not None) and os.path.exists( os.path.join(os.path.dirname(debug_so_full), debug_soname) ):
f.write('\tIMPORTED_SONAME_DEBUG "%s"\n' % utils.get_norm_path(debug_soname, native=False))
if (release_soname is not None) and os.path.exists( os.path.join(os.path.dirname(release_so_full), release_soname) ):
f.write('\tIMPORTED_SONAME_RELEASE "%s"\n' % utils.get_norm_path(release_soname, native=False))
if (relwithdebinfo_soname is not None) and os.path.exists( os.path.join(os.path.dirname(relwithdebinfo_so_full), relwithdebinfo_soname) ):
f.write('\tIMPORTED_SONAME_RELWITHDEBINFO "%s"\n' % utils.get_norm_path(relwithdebinfo_soname, native=False))
if (minsizerel_soname is not None) and os.path.exists( os.path.join(os.path.dirname(minsizerel_so_full), minsizerel_soname) ):
f.write('\tIMPORTED_SONAME_MINSIZEREL "%s"\n' % utils.get_norm_path(minsizerel_soname, native=False))
f.write(')\n')
if ('static' in platform_info) and (package != 'dummy'):
static = platform_info['static']
workbase = os.path.join(oldcwd, workspace, base_folder, plat)
if not self.check_parts_exists(workbase, superpackage, package, static, [('lib', True)]):
errors += 1
debug_lib = self.search_library_noexcept(workbase, static, 'lib')
release_lib = self.search_library_noexcept(workbase, static, 'lib')
relwithdebinfo_lib = self.search_library_noexcept(workbase, static, 'lib')
minsizerel_lib = self.search_library_noexcept(workbase, static, 'lib')
if add_3rdparty_dependencies:
# register target
f.write('list(APPEND %s_LIBRARIES %s)\n' % (superpackage_upper, package_lower))
f.write('ADD_LIBRARY(%s STATIC IMPORTED)\n' % package_lower)
f.write('SET_PROPERTY(TARGET %s APPEND PROPERTY IMPORTED_CONFIGURATIONS DEBUG RELEASE RELWITHDEBINFO MINSIZEREL)\n' % package_lower)
f.write('SET_TARGET_PROPERTIES(%s PROPERTIES\n' % package_lower)
# lib
f.write('\tIMPORTED_LOCATION_DEBUG "${_DIR}/%s/%s"\n' % (plat, utils.get_norm_path(debug_lib, native=False)))
f.write('\tIMPORTED_LOCATION_RELEASE "${_DIR}/%s/%s"\n' % (plat, utils.get_norm_path(release_lib, native=False)))
f.write('\tIMPORTED_LOCATION_RELWITHDEBINFO "${_DIR}/%s/%s"\n' % (plat, utils.get_norm_path(relwithdebinfo_lib, native=False)))
f.write('\tIMPORTED_LOCATION_MINSIZEREL "${_DIR}/%s/%s"\n' % (plat, utils.get_norm_path(minsizerel_lib, native=False)))
f.write(')\n')
if install_3rdparty_dependencies and (package != 'dummy'):
f.write('cmaki_install_3rdparty(%s)\n' % package_lower)
f.write('\n')
if self.has_library(platform_info) and (package != 'dummy'):
f.write('endif()\n\n')
# print includes
if len(includes_set) > 0:
# TODO: remove repeats
for d in list(set(includes_set)):
f.write('list(APPEND %s_INCLUDE_DIRS ${_DIR}/%s)\n' % (superpackage_upper, d))
f.write('\n')
if len(definitions_set) > 0:
# TODO: remove repeats
for d in list(set(definitions_set)):
f.write('add_definitions(%s)\n' % d)
f.write('\n')
if len(system_depends_set) > 0:
# TODO: remove repeats
f.write('# begin system depends\n')
for sd in list(set(system_depends_set)):
f.write('list(APPEND %s_LIBRARIES %s)\n' % (superpackage_upper, sd))
f.write('# end system depends\n')
if self.get_generate_find_package():
f.write('# Depends of %s (%s)\n' % (self.get_package_name(), self.get_version()))
for dep in self.get_depends_raw():
package_name = dep.get_package_name()
if package_name not in depends_set:
if dep.have_any_in_target(plat, 'dynamic', compiler_replace_maps):
f.write('cmaki_find_package(%s)\n' % (package_name))
else:
f.write('# cmaki_find_package(%s) # static package\n' % (package_name))
depends_set.add(package_name)
f.write('\n')
logging.info('----------------------------------------------------')
if self.user_parameters.fast:
logging.debug('skipping for because is in fast mode: "generate_cmakefiles"')
break
return errors
def show_environment_vars(self, env_modified):
package = self.get_package_name()
logging.debug('------- begin print environment variables for compile %s ---------' % package)
for key, value in sorted(env_modified.iteritems()):
logging.debug("%s=%s" % (key, value))
logging.debug('------- end print environment variables for compile %s -----------' % package)
def get_first_environment(self, compiler_replace_maps):
for plat in platforms:
for _, _, _, _, _, env_modified, _ in self.compiler_iterator(plat, compiler_replace_maps):
return env_modified
return os.environ.copy()
def safe_system(self, cmd, compiler_replace_maps):
return utils.safe_system(cmd, env=self.get_first_environment(compiler_replace_maps))
|
|
# unmask_jemalloc - De Mysteriis Dom jemalloc
#
# Copyright (c) 2014 Patroklos Argyroudis <argp at domain census-labs.com>
# Copyright (c) 2014 Chariton Karamitas <huku at domain census-labs.com>
# Copyright (c) 2014 Census, Inc. (http://www.census-labs.com/)
import os
import sys
import warnings
sys.path.append('.')
# import everything from gdbwrap in the current namespace so that the
# global `gdb' object is easily accessible
from gdbwrap import *
import jemalloc
import gdbutil
true = True
false = False
# globals
jeheap = jemalloc.jemalloc()
parsed = false
########## internal parsing stuff ##########
# parse jemalloc configuration options
def jeparse_options():
global jeheap
# thread magazine caches (disabled on firefox)
try:
opt_mag = gdb.parse_and_eval('opt_mag')
except RuntimeError:
opt_mag = 0
try:
opt_tcache = gdb.parse_and_eval('opt_tcache')
except RuntimeError:
opt_tcache = 0
try:
opt_lg_tcache_nslots = \
gdb.parse_and_eval('opt_lg_tcache_nslots')
except RuntimeError:
opt_lg_tcache_nslots = 0
if opt_mag != 0 or opt_tcache != 0 or opt_lg_tcache_nslots != 0:
jeheap.MAGAZINES = true
if jeheap.MAGAZINES == true:
try:
expr = 'sizeof(mag_rack_t) + (sizeof(bin_mags_t) * (jeheap.nbins - 1))'
jeheap.magrack_size = \
gdbutil.to_int(gdb.parse_and_eval(expr))
except RuntimeError:
# standalone variant
jeheap.STANDALONE = true
expr = 'sizeof(tcache_t) + (sizeof(tcache_bin_t) * (jeheap.nbins - 1))'
jemalloc.magrack_size = \
gdbutil.to_int(gdb.parse_and_eval(expr))
# parse general jemalloc information
def jeparse_general():
global jeheap
try:
jeheap.narenas = gdbutil.to_int(gdb.parse_and_eval('narenas'))
except RuntimeError:
print('[unmask_jemalloc] error: symbol narenas not found')
sys.exit()
try:
jeheap.nbins = gdbutil.to_int(gdb.parse_and_eval('nbins'))
except RuntimeError:
# XXX: these are firefox specific, we must add support for more
# jemalloc variants in the future
if sys.platform == 'darwin':
jeheap.ntbins = gdbutil.to_int(gdb.parse_and_eval('ntbins'))
jeheap.nsbins = gdbutil.to_int(gdb.parse_and_eval('nsbins'))
jeheap.nqbins = gdbutil.to_int(gdb.parse_and_eval('nqbins'))
jeheap.nbins = jeheap.ntbins + jeheap.nsbins + jeheap.nqbins
else:
if jeheap.DWORD_SIZE == 4:
jeheap.nbins = 36
elif jeheap.DWORD_SIZE == 8:
jeheap.nbins = 35
# XXX: figure out how to calculate the chunk size correctly, this is
# firefox specific
jeheap.chunk_size = 1 << 20
# parse jemalloc arena information
def jeparse_arenas():
global jeheap
jeheap.arenas[:] = []
for i in range(0, jeheap.narenas):
current_arena = jemalloc.arena(0, i, [])
try:
current_arena.addr = \
gdbutil.to_int(gdb.parse_and_eval('arenas[%d]' % (i)))
except:
print('[unmask_jemalloc] error: cannot evaluate arenas[%d]') % (i)
sys.exit()
for j in range(0, jeheap.nbins):
nrg = 0
run_sz = 0
reg_size = 0
reg_offset = 0
end_addr = 0
try:
expr = 'arenas[%d].bins[%d].reg_size' % (i, j)
reg_size = \
gdbutil.to_int(gdb.parse_and_eval(expr))
expr = 'arenas[%d].bins[%d].reg0_offset' % (i, j)
reg_offset = \
gdbutil.to_int(gdb.parse_and_eval(expr))
except RuntimeError:
# XXX: for now assume it's a standalone variant; we
# need to do some error checking here too.
jeheap.STANDALONE = true
expr = 'arena_bin_info[%d].reg_size' % (j)
reg_size = \
gdbutil.to_int(gdb.parse_and_eval(expr))
expr = 'arena_bin_info[%d].nregs' % (j)
nrg = \
gdbutil.to_int(gdb.parse_and_eval(expr))
expr = 'arena_bin_info[%d].run_size' % (j)
run_sz = \
gdbutil.to_int(gdb.parse_and_eval(expr))
try:
expr = 'arenas[%d].bins[%d].runcur' % (i, j)
runcur_addr = runcur = \
gdbutil.to_int(gdb.parse_and_eval(expr))
end_addr = runcur_addr + run_sz
if runcur != 0:
current_run = \
jemalloc.arena_run(runcur, end_addr, run_sz, 0, \
int(reg_size), reg_offset, nrg, 0, [])
current_bin = jemalloc.arena_bin(0, j, current_run)
current_bin.addr = \
gdbutil.to_int(gdb.parse_and_eval('&arenas[%d].bins[%d]' % (i, j)))
current_arena.bins.append(current_bin)
else:
# no regions for this size class yet, therefore no runcur
current_run = jemalloc.arena_run()
current_bin = jemalloc.arena_bin(0, j, current_run)
current_arena.bins.append(current_bin)
except RuntimeError:
current_run = jemalloc.arena_run()
current_bin = jemalloc.arena_bin(0, j, current_run)
current_arena.bins.append(current_bin)
continue
# add arena to the list of arenas
jeheap.arenas.append(current_arena)
# parse the metadata of all runs and their regions
def jeparse_all_runs(proc):
global jeheap
# number of pages a chunk occupies
chunk_npages = jeheap.chunk_size >> 12
# offset of bits in arena_chunk_map_t in double words
bitmap_offset = \
gdbutil.offsetof('arena_chunk_map_t', 'bits') / jeheap.DWORD_SIZE
# number of double words occupied by an arena_chunk_map_t
chunk_map_dwords = \
(bitmap_offset / jeheap.DWORD_SIZE) + 1
# prefix to use in gdb's examine command
if jeheap.DWORD_SIZE == 8:
dword_fmt = 'g'
else:
dword_fmt = 'w'
# the 12 least significant bits of each bitmap entry hold
# various flags for the corresponding run
flags_mask = (1 << 12) - 1
# delete the heap's runs' array
jeheap.runs[:] = []
for chunk in jeheap.chunks:
if not chunk.arena:
continue
try:
# parse the whole map at once to avoid gdb delays
expr = 'x/%d%sx ((arena_chunk_t *)%#x)->map' % \
(chunk_npages * chunk_map_dwords, dword_fmt, chunk.addr)
except:
print('[unmask_jemalloc] error: cannot read bitmap from chunk %#x' % (chunk.addr))
sys.exit()
lines = (gdb.execute(expr, to_string = true)).split('\n')
dwords = []
i = 0
for line in lines:
dwords += [int(dw, 16) for dw in line[line.find(':') + 1:].split()]
bitmap = [dwords[i] for i in range(int(bitmap_offset), \
int(len(dwords)), int(bitmap_offset + 1))]
# traverse the bitmap
for mapelm in bitmap:
flags = mapelm & flags_mask
# flags == 1 means the chunk is small and the rest of the bits
# hold the actual run address
if flags == 1:
addr = mapelm & ~flags_mask
size = gdbutil.get_page_size()
# flags = 3 indicates a large chunk; calculate the run's address
# directly from the map element index and extract the run's size
elif flags == 3:
addr = chunk.addr + i * gdbutil.get_page_size()
size = mapelm & ~flags_mask
# run is not allocated? skip it
else:
continue
if addr not in [r.start for r in jeheap.runs]:
# XXX: we need to parse run headers here with a
# dedicated function
new_run = jemalloc.arena_run(addr, 0, size, 0, 0, 0, 0, 0, [])
jeheap.runs.append(new_run)
# parse metadata of current runs and their regions
def jeparse_runs(proc):
global jeheap
for i in range(0, len(jeheap.arenas)):
for j in range(0, len(jeheap.arenas[i].bins)):
try:
run_addr = jeheap.arenas[i].bins[j].run.start
bin_addr = \
gdbutil.buf_to_le(proc.read_memory(run_addr, jeheap.DWORD_SIZE))
jeheap.arenas[i].bins[j].run.bin = bin_addr
if jeheap.STANDALONE == false:
jeheap.arenas[i].bins[j].run.size = \
gdbutil.buf_to_le(proc.read_memory(bin_addr + \
(6 * jeheap.DWORD_SIZE), jeheap.DWORD_SIZE))
jeheap.arenas[i].bins[j].run.end = \
run_addr + jeheap.arenas[i].bins[j].run.size
jeheap.arenas[i].bins[j].run.total_regions = \
gdbutil.buf_to_le(proc.read_memory(bin_addr + \
(7 * jeheap.DWORD_SIZE), gdbutil.INT_SIZE))
except RuntimeError:
continue
# XXX: this isn't correct on jemalloc standalone *debug* variant
try:
jeheap.arenas[i].bins[j].run.free_regions = \
gdbutil.buf_to_le(proc.read_memory(run_addr + \
jeheap.DWORD_SIZE + gdbutil.INT_SIZE, gdbutil.INT_SIZE))
except RuntimeError:
jeheap.arenas[i].bins[j].run.free_regions = 0
continue
if jeheap.arenas[i].bins[j].run.free_regions < 0:
jeheap.arenas[i].bins[j].run.free_regions = 0
# delete the run's regions
jeheap.arenas[i].bins[j].run.regions[:] = []
# the run's regions
reg0_offset = jeheap.arenas[i].bins[j].run.reg0_offset;
first_region_addr = reg0_addr = run_addr + reg0_offset
regs_mask_bits = \
(jeheap.arenas[i].bins[j].run.total_regions / 8) + 1
regs_mask_str = \
gdb.execute('x/%dbt arenas[%d].bins[%d].runcur.regs_mask' % \
(regs_mask_bits, i, j), to_string = true)
regs_mask = ''
for line in regs_mask_str.splitlines():
line = line[line.find(':') + 1 : line.find('\n')]
line = line.replace('\n', '')
line = line.replace('\t', '')
line = line.replace(' ', '')
regs_mask += line
jeheap.arenas[i].bins[j].run.regs_mask = regs_mask
first_region = jemalloc.region(0, first_region_addr, \
int(jeheap.arenas[i].bins[j].run.regs_mask[0]))
addr = first_region.addr
try:
first_region.content_preview = \
hex(gdbutil.buf_to_le(proc.read_memory(addr, \
gdbutil.INT_SIZE))).rstrip('L')
except RuntimeError:
continue
jeheap.arenas[i].bins[j].run.regions.append(first_region)
for k in range(1, jeheap.arenas[i].bins[j].run.total_regions):
try:
current_region = jemalloc.region(k, 0, \
int(jeheap.arenas[i].bins[j].run.regs_mask[k]))
except:
current_region = jemalloc.region(k, 0, 0)
addr = current_region.addr = \
reg0_addr + (k * jeheap.arenas[i].bins[j].run.region_size)
try:
current_region.content_preview = \
hex(gdbutil.buf_to_le(proc.read_memory(addr, \
gdbutil.INT_SIZE))).rstrip('L')
except:
continue
jeheap.arenas[i].bins[j].run.regions.append(current_region)
# parse all jemalloc chunks
def jeparse_chunks():
global jeheap
# delete the chunks' list
jeheap.chunks[:] = []
try:
root = gdbutil.to_int(gdb.parse_and_eval('chunk_rtree.root'))
height = gdbutil.to_int(gdb.parse_and_eval('chunk_rtree.height'))
level2bits = []
for i in range(0, height):
expr = 'chunk_rtree.level2bits[%d]' % (i)
level2bits.append(gdbutil.to_int(gdb.parse_and_eval(expr)))
except:
print('[unmask_jemalloc] error: cannot parse chunk radix tree')
sys.exit()
# check if we're running on x86_64
if jeheap.DWORD_SIZE == 8:
dw_fmt = 'g'
else:
dw_fmt = 'w'
# parse the radix tree using a stack
stack = [(root, 0)]
while len(stack):
(node, node_height) = stack.pop()
child_cnt = 1 << level2bits[node_height]
dump = gdb.execute('x/%d%sx %#x' % (child_cnt, dw_fmt, node), to_string = true)
for line in dump.split('\n'):
line = line[line.find(':') + 1:]
for address in line.split():
address = int(address, 16)
if address != 0:
# leaf nodes hold pointers to actual values
if node_height == height - 1:
expr = '((arena_chunk_t *)%#x)->arena' % address
arena = gdbutil.to_int(gdb.parse_and_eval(expr))
exists = false
if arena in [i.addr for i in jeheap.arenas]:
exists = true
if exists:
jeheap.chunks.append(jemalloc.arena_chunk(address, arena))
else:
jeheap.chunks.append(jemalloc.arena_chunk(address))
# non-leaf nodes are inserted in the stack
else:
stack.append((address, node_height + 1))
# our old workhorse, now broken in pieces
def jeparse(proc):
global jeheap
global parsed
parsed = false
print('[unmask_jemalloc] parsing structures from memory...')
jeparse_options()
jeparse_general()
jeparse_arenas()
jeparse_runs(proc)
jeparse_chunks()
jeparse_all_runs(proc)
parsed = true
print('[unmask_jemalloc] structures parsed')
########## exported gdb commands ##########
class jemalloc_help(gdb.Command):
'''Details about the commands provided by unmask_jemalloc'''
def __init__(self):
gdb.Command.__init__(self, 'jehelp', gdb.COMMAND_OBSCURE)
def invoke(self, arg, from_tty):
print('[unmask_jemalloc] De Mysteriis Dom jemalloc')
print('[unmask_jemalloc] %s\n' % (jemalloc.VERSION))
print('[unmask_jemalloc] available commands:')
print('[unmask_jemalloc] jechunks : dump info on all available chunks')
print('[unmask_jemalloc] jearenas : dump info on jemalloc arenas')
print('[unmask_jemalloc] jeruns [-c] : dump info on jemalloc runs (-c for current runs only)')
print('[unmask_jemalloc] jebins : dump info on jemalloc bins')
print('[unmask_jemalloc] jeregions <size class> : dump all current regions of the given size class')
print('[unmask_jemalloc] jesearch [-c] <hex> : search the heap for the given hex value (-c for current runs only)')
print('[unmask_jemalloc] jedump [filename] : dump all available info to screen (default) or file')
print('[unmask_jemalloc] jeparse : (re)parse jemalloc structures from memory')
print('[unmask_jemalloc] jeversion : output version number')
print('[unmask_jemalloc] jehelp : this help message')
class jemalloc_version(gdb.Command):
'''Output version number'''
def __init__(self):
gdb.Command.__init__(self, 'jeversion', gdb.COMMAND_OBSCURE)
def invoke(self, arg, from_tty):
print('[unmask_jemalloc] %s' % (jemalloc.VERSION))
class jemalloc_parse(gdb.Command):
'''Parse jemalloc structures from memory'''
def __init__(self):
gdb.Command.__init__(self, 'jeparse', gdb.COMMAND_OBSCURE)
self.proc = gdb.inferiors()[0]
def invoke(self, arg, from_tty):
jeparse(self.proc)
class jemalloc_dump(gdb.Command):
'''Dump all available jemalloc info to screen (default) or to file'''
def __init__(self):
gdb.Command.__init__(self, 'jedump', gdb.COMMAND_OBSCURE)
self.proc = gdb.inferiors()[0]
def invoke(self, arg, from_tty):
global jeheap
if arg == '':
print('[unmask_jemalloc] dumping all jemalloc info to screen')
else:
print('[unmask_jemalloc] dumping all jemalloc info to file %s' % (arg))
if os.path.exists(arg):
print('[unmask_jemalloc] error: file %s already exists' % (arg))
return
try:
sys.stdout = open(arg, 'w')
except:
print('[unmask_jemalloc] error opening file %s for writing' % (arg))
if parsed == false:
jeparse(self.proc)
# general jemalloc info
print(jeheap)
print('')
# info on chunks
for chunk in jeheap.chunks:
print(chunk)
print('')
# info on arenas
for i in range(0, len(jeheap.arenas)):
print(jeheap.arenas[i])
print('')
# info on current runs and bins
for j in range(0, len(jeheap.arenas[i].bins)):
print(jeheap.arenas[i].bins[j].run)
print(jeheap.arenas[i].bins[j])
# info on current regions
for k in range(0, len(jeheap.arenas[i].bins[j].run.regions)):
print('[unmask_jemalloc] [region %03d] [%#x]' % \
(k, jeheap.arenas[i].bins[j].run.regions[k].addr))
print('')
# reset stdout
if arg != '':
sys.stdout = sys.__stdout__
class jemalloc_chunks(gdb.Command):
'''Dump info on all available chunks'''
def __init__(self):
gdb.Command.__init__(self, 'jechunks', gdb.COMMAND_OBSCURE)
self.proc = gdb.inferiors()[0]
def invoke(self, arg, from_tty):
global jeheap
if parsed == false:
jeparse(self.proc)
for chunk in jeheap.chunks:
print(chunk)
class jemalloc_arenas(gdb.Command):
'''Dump info on jemalloc arenas'''
def __init__(self):
gdb.Command.__init__(self, 'jearenas', gdb.COMMAND_OBSCURE)
self.proc = gdb.inferiors()[0]
def invoke(self, arg, from_tty):
global jeheap
if parsed == false:
jeparse(self.proc)
print(jeheap)
class jemalloc_runs(gdb.Command):
'''Dump info on jemalloc runs'''
def __init__(self):
gdb.Command.__init__(self, 'jeruns', gdb.COMMAND_OBSCURE)
self.proc = gdb.inferiors()[0]
def invoke(self, arg, from_tty):
global jeheap
if parsed == false:
jeparse(self.proc)
arg = arg.split()
if len(arg) >= 1 and arg[0] == '-c':
current_runs = true
else:
current_runs = false
if current_runs == true:
print('[unmask_jemalloc] listing current runs only')
for i in range(0, len(jeheap.arenas)):
print(jeheap.arenas[i])
for j in range(0, len(jeheap.arenas[i].bins)):
print(jeheap.arenas[i].bins[j].run)
else:
print('[unmask_jemalloc] listing all allocated runs')
total_runs = len(jeheap.runs)
print('[unmask_jemalloc] [total runs %d]' % (total_runs))
for i in range(0, total_runs):
print('[unmask_jemalloc] [run %#x] [size %07d]' % \
(jeheap.runs[i].start, jeheap.runs[i].size))
class jemalloc_bins(gdb.Command):
'''Dump info on jemalloc bins'''
def __init__(self):
gdb.Command.__init__(self, 'jebins', gdb.COMMAND_OBSCURE)
self.proc = gdb.inferiors()[0]
def invoke(self, arg, from_tty):
global jeheap
if parsed == false:
jeparse(self.proc)
for i in range(0, len(jeheap.arenas)):
print(jeheap.arenas[i])
for j in range(0, len(jeheap.arenas[i].bins)):
print(jeheap.arenas[i].bins[j])
class jemalloc_regions(gdb.Command):
'''Dump all current regions of the given size class'''
def __init__(self):
gdb.Command.__init__(self, 'jeregions', gdb.COMMAND_OBSCURE)
self.proc = gdb.inferiors()[0]
def invoke(self, arg, from_tty):
global jeheap
if arg == '':
print('[unmask_jemalloc] usage: jeregions <size class>')
print('[unmask_jemalloc] for example: jeregions 1024')
return
if parsed == false:
jeparse(self.proc)
size_class = int(arg)
print('[unmask_jemalloc] dumping all regions of size class %d' % (size_class))
found = false
for i in range(0, len(jeheap.arenas)):
for j in range(0, len(jeheap.arenas[i].bins)):
if jeheap.arenas[i].bins[j].run.region_size == size_class:
found = true
print(jeheap.arenas[i].bins[j].run)
# the bitmask of small-sized runs is too big to display
# print '[unmask_jemalloc] [regs_mask %s]' % (jeheap.arenas[i].bins[j].run.regs_mask)
for k in range(0, len(jeheap.arenas[i].bins[j].run.regions)):
print(jeheap.arenas[i].bins[j].run.regions[k])
if found == false:
print('[unmask_jemalloc] no regions found for size class %d' % (size_class))
class jemalloc_search(gdb.Command):
'''Search the jemalloc heap for the given hex value'''
def __init__(self):
gdb.Command.__init__(self, 'jesearch', gdb.COMMAND_OBSCURE)
self.proc = gdb.inferiors()[0]
def invoke(self, arg, from_tty):
global jeheap
if arg == '':
print('[unmask_jemalloc] usage: jesearch [-c] <hex value>')
print('[unmask_jemalloc] Use -c to search current runs only')
print('[unmask_jemalloc] for example: jesearch 0x41424344')
return
arg = arg.split()
if len(arg) >= 2 and arg[0] == '-c':
current_runs = true
search_for = arg[1]
else:
current_runs = false
search_for = arg[0]
if parsed == false:
jeparse(self.proc)
results = []
found = false
if current_runs == true:
print('[unmask_jemalloc] searching all current runs for %s' % (search_for))
for i in range(0, len(jeheap.arenas)):
for j in range(0, len(jeheap.arenas[i].bins)):
try:
out_str = gdb.execute('find %#x, %#x, %s' % \
(jeheap.arenas[i].bins[j].run.start, \
jeheap.arenas[i].bins[j].run.end, \
search_for), \
to_string = true)
except:
continue
str_results = out_str.split('\n')
for str_result in str_results:
if str_result.startswith('0x'):
found = true
results.append((str_result, jeheap.arenas[i].bins[j].run.start))
else:
print('[unmask_jemalloc] searching all chunks for %s' % (search_for))
for chunk in jeheap.chunks:
try:
out_str = gdb.execute('find %#x, %#x, %s' % \
(chunk.addr, chunk.addr + jeheap.chunk_size, search_for), \
to_string = true)
except:
continue
str_results = out_str.split('\n')
for str_result in str_results:
if str_result.startswith('0x'):
found = true
results.append((str_result, chunk.addr))
if found == false:
print('[unmask_jemalloc] value %s not found' % (search_for))
return
for (what, where) in results:
if current_runs == true:
print('[unmask_jemalloc] found %s at %s (run %#x)' % \
(search_for, what, where))
else:
print('[unmask_jemalloc] found %s at %s (chunk %#x)' % \
(search_for, what, where))
# required for classes that implement gdb commands
jemalloc_parse()
jemalloc_dump()
jemalloc_chunks()
jemalloc_arenas()
jemalloc_runs()
jemalloc_bins()
jemalloc_regions()
jemalloc_search()
jemalloc_help()
jemalloc_version()
# EOF
|
|
#!/usr/bin/env pythonw
# pylint: disable=W0612,C0111,C0103,W0201,E402
print("-I- Importing Pmag GUI dependencies")
from pmag_env import set_env
import matplotlib
matplotlib.use('WXAgg')
import sys
import wx
import wx.lib.buttons as buttons
import wx.lib.newevent as newevent
import os
import webbrowser
from pmagpy import pmag
from pmagpy import ipmag
from pmagpy import contribution_builder as cb
from dialogs import pmag_gui_dialogs
from dialogs import pmag_er_magic_dialogs
from dialogs import pmag_gui_menu3 as pmag_gui_menu
from dialogs import ErMagicBuilder
from dialogs import pmag_widgets as pw
import pmagpy.find_pmag_dir as find_pmag_dir
PMAGPY_DIRECTORY = find_pmag_dir.get_pmag_dir()
from programs import demag_gui
from programs import thellier_gui
class MagMainFrame(wx.Frame):
""""""
try:
version = pmag.get_version()
except:
version = ""
title = "Pmag GUI version: %s"%version
def __init__(self, WD=None, dmodel=None):
"""
Input working directory, and data model object (optional).
"""
wx.Frame.__init__(self, None, wx.ID_ANY, self.title, name='pmag_gui mainframe')
#set icon
self.icon = wx.Icon()
icon_path = os.path.join(PMAGPY_DIRECTORY, 'programs', 'images', 'PmagPy.ico')
if os.path.isfile(icon_path):
self.icon.CopyFromBitmap(wx.Bitmap(icon_path, wx.BITMAP_TYPE_ANY))
self.SetIcon(self.icon)
else:
print("-I- PmagPy icon file not found -- skipping")
self.data_model = dmodel
self.FIRST_RUN = True
self.panel = wx.Panel(self, name='pmag_gui main panel')
self.InitUI()
# if not specified on the command line,
# make the user choose their working directory
if WD:
self.WD = WD
else:
self.get_dir()
self.get_wd_data()
# use realpath
self.WD = os.path.realpath(self.WD)
# set data model and read in data
self.dir_path.SetValue(self.WD)
# for use as module:
self.resource_dir = os.getcwd()
# set some things
self.HtmlIsOpen = False
self.Bind(wx.EVT_CLOSE, self.on_menu_exit)
# if specified directory doesn't exist, try to make it
try:
if not os.path.exists(self.WD):
os.mkdir(self.WD)
pw.simple_warning("New directory: {}\nwill be created".format(self.WD))
except FileNotFoundError:
pw.simple_warning("You have provided a directory that does not exist and cannot be created.\n Please pick a different directory.")
print("-W- You have provided a directory that does not exist and cannot be created.\n Please pick a different directory.")
# do menubar
menubar = pmag_gui_menu.MagICMenu(self)
self.SetMenuBar(menubar)
self.menubar = menubar
def get_wd_data(self):
"""
Show dialog to get user input for which directory
to set as working directory.
Called by self.get_dm_and_wd
"""
wait = wx.BusyInfo('Reading in data from current working directory, please wait...')
#wx.Yield()
print('-I- Read in any available data from working directory')
self.contribution = cb.Contribution(self.WD, dmodel=self.data_model)
del wait
def InitUI(self):
"""
Build the mainframe
"""
menubar = pmag_gui_menu.MagICMenu(self)
self.SetMenuBar(menubar)
#pnl = self.panel
#---sizer logo ----
#start_image = wx.Image("/Users/ronshaar/PmagPy/images/logo2.png")
#start_image = wx.Image("/Users/Python/simple_examples/001.png")
#start_image.Rescale(start_image.GetWidth(), start_image.GetHeight())
#image = wx.BitmapFromImage(start_image)
#self.logo = wx.StaticBitmap(self.panel, -1, image)
#---sizer 0 ----
bSizer0 = wx.StaticBoxSizer(wx.StaticBox(self.panel, wx.ID_ANY, "Choose MagIC project directory"), wx.HORIZONTAL)
self.dir_path = wx.TextCtrl(self.panel, id=-1, size=(600,25), style=wx.TE_READONLY)
self.change_dir_button = buttons.GenButton(self.panel, id=-1, label="change directory",size=(-1, -1))
self.change_dir_button.SetBackgroundColour("#F8F8FF")
self.change_dir_button.InitColours()
self.Bind(wx.EVT_BUTTON, self.on_change_dir_button, self.change_dir_button)
bSizer0.Add(self.change_dir_button, wx.ALIGN_LEFT)
bSizer0.AddSpacer(40)
bSizer0.Add(self.dir_path,wx.ALIGN_CENTER_VERTICAL)
# not fully implemented method for saving/reverting WD
# last saved: []
#bSizer0_1 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "Save MagIC project directory in current state or revert to last-saved state" ), wx.HORIZONTAL )
#saved_label = wx.StaticText(self.panel, -1, "Last saved:", (20, 120))
#self.last_saved_time = wx.TextCtrl(self.panel, id=-1, size=(100,25), style=wx.TE_READONLY)
#now = datetime.datetime.now()
#now_string = "{}:{}:{}".format(now.hour, now.minute, now.second)
#self.last_saved_time.write(now_string)
#self.save_dir_button = buttons.GenButton(self.panel, id=-1, label = "save dir", size=(-1, -1))
#self.revert_dir_button = buttons.GenButton(self.panel, id=-1, label = "revert dir", size=(-1, -1))
#self.Bind(wx.EVT_BUTTON, self.on_revert_dir_button, self.revert_dir_button)
#self.Bind(wx.EVT_BUTTON, self.on_save_dir_button, self.save_dir_button)
#bSizer0_1.Add(saved_label, flag=wx.RIGHT, border=10)
#bSizer0_1.Add(self.last_saved_time, flag=wx.RIGHT, border=10)
#bSizer0_1.Add(self.save_dir_button,flag=wx.ALIGN_LEFT|wx.RIGHT, border=10)
#bSizer0_1.Add(self.revert_dir_button,wx.ALIGN_LEFT)
#
#---sizer 1 ----
bSizer1 = wx.StaticBoxSizer(wx.StaticBox(self.panel, wx.ID_ANY, "Import data to working directory"), wx.HORIZONTAL)
text = "1. Convert magnetometer files to MagIC format"
self.btn1 = buttons.GenButton(self.panel, id=-1, label=text,
size=(450, 50), name='step 1')
self.btn1.SetBackgroundColour("#FDC68A")
self.btn1.InitColours()
self.Bind(wx.EVT_BUTTON, self.on_convert_file, self.btn1)
text = "2. (optional) Calculate geographic/tilt-corrected directions"
self.btn2 = buttons.GenButton(self.panel, id=-1, label=text, size=(450, 50), name='step 2')
self.btn2.SetBackgroundColour("#FDC68A")
self.btn2.InitColours()
self.Bind(wx.EVT_BUTTON, self.on_btn_orientation, self.btn2)
text = "3. (optional) Add MagIC metadata for uploading data to MagIC "
self.btn3 = buttons.GenButton(self.panel, id=-1, label=text, size=(450, 50), name='step 3')
self.btn3.SetBackgroundColour("#FDC68A")
self.btn3.InitColours()
self.Bind(wx.EVT_BUTTON, self.on_btn_metadata, self.btn3)
text = "Download or unpack MagIC text file"
self.btn4 = buttons.GenButton(self.panel, id=-1, label=text, size=(330, 50))
self.btn4.SetBackgroundColour("#FDC68A")
self.btn4.InitColours()
self.Bind(wx.EVT_BUTTON, self.on_btn_unpack, self.btn4)
text = "Convert directory to 3.0. format (legacy data only)"
self.btn1a = buttons.GenButton(self.panel, id=-1, label=text,
size=(330, 50), name='step 1a')
self.btn1a.SetBackgroundColour("#FDC68A")
self.btn1a.InitColours()
self.Bind(wx.EVT_BUTTON, self.on_btn_convert_3, self.btn1a)
#str = "OR"
OR = wx.StaticText(self.panel, -1, "or", (20, 120))
font = wx.Font(18, wx.SWISS, wx.NORMAL, wx.NORMAL)
OR.SetFont(font)
#bSizer0.Add(self.panel,self.btn1,wx.ALIGN_TOP)
bSizer1_1 = wx.BoxSizer(wx.VERTICAL)
bSizer1_1.AddSpacer(20)
bSizer1_1.Add(self.btn1, wx.ALIGN_TOP)
bSizer1_1.AddSpacer(20)
bSizer1_1.Add(self.btn2, wx.ALIGN_TOP)
bSizer1_1.AddSpacer(20)
bSizer1_1.Add(self.btn3, wx.ALIGN_TOP)
bSizer1_1.AddSpacer(20)
bSizer1.Add(bSizer1_1, wx.ALIGN_CENTER, wx.EXPAND)
bSizer1.AddSpacer(20)
bSizer1.Add(OR, 0, wx.ALIGN_CENTER, 0)
bSizer1.AddSpacer(20)
bSizer1_2 = wx.BoxSizer(wx.VERTICAL)
spacing = 60
bSizer1_2.AddSpacer(spacing)
bSizer1_2.Add(self.btn4, 0, wx.ALIGN_CENTER, 0)
bSizer1_2.AddSpacer(20)
bSizer1_2.Add(self.btn1a, 0, wx.ALIGN_CENTER, 0)
bSizer1_2.AddSpacer(20)
bSizer1.Add(bSizer1_2)
bSizer1.AddSpacer(20)
#---sizer 2 ----
bSizer2 = wx.StaticBoxSizer(wx.StaticBox(self.panel, wx.ID_ANY, "Analysis and plots" ), wx.HORIZONTAL)
text = "Demag GUI"
self.btn_demag_gui = buttons.GenButton(self.panel, id=-1, label=text, size=(300, 50), name='demag gui')
self.btn_demag_gui.SetBackgroundColour("#6ECFF6")
self.btn_demag_gui.InitColours()
self.Bind(wx.EVT_BUTTON, self.on_btn_demag_gui, self.btn_demag_gui)
text = "Thellier GUI"
self.btn_thellier_gui = buttons.GenButton(self.panel, id=-1, label=text, size=(300, 50), name='thellier gui')
self.btn_thellier_gui.SetBackgroundColour("#6ECFF6")
self.btn_thellier_gui.InitColours()
self.Bind(wx.EVT_BUTTON, self.on_btn_thellier_gui, self.btn_thellier_gui)
bSizer2.AddSpacer(20)
bSizer2.Add(self.btn_demag_gui, 0, wx.ALIGN_CENTER, 0)
bSizer2.AddSpacer(20)
bSizer2.Add(self.btn_thellier_gui, 0, wx.ALIGN_CENTER, 0)
bSizer2.AddSpacer(20)
#---sizer 3 ----
bSizer3 = wx.StaticBoxSizer(wx.StaticBox(self.panel, wx.ID_ANY, "Create file for upload to MagIC database"), wx.HORIZONTAL)
text = "Create MagIC txt file for upload"
self.btn_upload = buttons.GenButton(self.panel, id=-1, label=text, size=(300, 50))
self.btn_upload.SetBackgroundColour("#C4DF9B")
self.btn_upload.InitColours()
bSizer3.AddSpacer(20)
bSizer3.Add(self.btn_upload, 0, wx.ALIGN_CENTER, 0)
bSizer3.AddSpacer(20)
self.Bind(wx.EVT_BUTTON, self.on_btn_upload, self.btn_upload)
#---arange sizers ----
hbox = wx.BoxSizer(wx.HORIZONTAL)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.AddSpacer(5)
#vbox.Add(self.logo,0,wx.ALIGN_CENTER,0)
vbox.AddSpacer(5)
vbox.Add(bSizer0, 0, wx.ALIGN_CENTER, 0)
vbox.AddSpacer(10)
#vbox.Add(bSizer0_1, 0, wx.ALIGN_CENTER, 0)
#vbox.AddSpacer(10)
vbox.Add(bSizer1, 0, wx.ALIGN_CENTER, 0)
vbox.AddSpacer(10)
vbox.Add(bSizer2, 0, wx.ALIGN_CENTER, 0)
vbox.AddSpacer(10)
vbox.Add(bSizer3, 0, wx.ALIGN_CENTER, 0)
vbox.AddSpacer(10)
hbox.AddSpacer(10)
hbox.Add(vbox, 0, wx.ALIGN_CENTER, 0)
hbox.AddSpacer(5)
self.panel.SetSizer(hbox)
hbox.Fit(self)
#----------------------------------------------------------------------
def get_dir(self):
"""
Choose a working directory dialog.
Called by self.get_dm_and_wd.
"""
if "-WD" in sys.argv and self.FIRST_RUN:
ind = sys.argv.index('-WD')
self.WD = os.path.abspath(sys.argv[ind+1])
os.chdir(self.WD)
self.WD = os.getcwd()
self.dir_path.SetValue(self.WD)
else:
self.on_change_dir_button(None)
#self.WD = os.getcwd()
self.FIRST_RUN = False
# this functionality is not fully working yet, so I've removed it for now
#try:
# print "trying listdir"
# os.listdir(self.WD)
#except Exception as ex:
# print ex
#print "self.WD.split('/')", self.WD.split('/')
#if len(self.WD.split('/')) <= 4:
# print "no to saving this directory"
#else:
# print "do on_save_dir_button"
# self.on_save_dir_button(None)
#----------------------------------------------------------------------
#def getFolderBitmap():
# img = folder_icon.GetImage().Rescale(50, 50)
# return img.ConvertToBitmap()
def on_change_dir_button(self, event, show=True):
currentDirectory = os.getcwd()
self.change_dir_dialog = wx.DirDialog(self.panel, "Choose your working directory to create or edit a MagIC contribution:", defaultPath=currentDirectory, style=wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON | wx.DD_CHANGE_DIR)
if show:
self.on_finish_change_dir(self.change_dir_dialog)
def on_finish_change_dir(self, dialog, show=True):
if not show:
self.WD = dialog.GetPath()
os.chdir(self.WD)
self.dir_path.SetValue(self.WD)
elif dialog.ShowModal() == wx.ID_OK:
self.WD = dialog.GetPath()
os.chdir(self.WD)
self.dir_path.SetValue(self.WD)
dialog.Destroy()
self.get_wd_data()
else:
dialog.Destroy()
# def on_revert_dir_button(self, event):
# if self.last_saved_time.GetLineText(0) == "not saved":
# dia = wx.MessageDialog(self.panel, "You can't revert, because your working directory has not been saved. Are you sure you're in the right directory?", "Can't be done", wx.OK)
# dia.ShowModal()
# return
# dia = wx.MessageDialog(self.panel, "Are you sure you want to revert to the last saved state? All changes since {} will be lost".format(self.last_saved_time.GetLineText(0)), "Not so fast", wx.YES_NO|wx.NO_DEFAULT)
# ok = dia.ShowModal()
# if ok == wx.ID_YES:
# os.chdir('..')
# wd = self.WD
# shutil.rmtree(wd)
# shutil.move(self.saved_dir, self.WD)
# os.chdir(self.WD)
# self.on_save_dir_button(None)
# else:
# print "-I Don't revert"
# def on_save_dir_button(self, event):
# try:
# if len(self.WD.split('/')) <= 4:
# self.last_saved_time.Clear()
# self.last_saved_time.write("not saved")
# return
# os.chdir('..')
# wd = self.WD
# wd = wd.rstrip('/')
# ind = wd.rfind('/') + 1
# saved_prefix, saved_folder = wd[:ind], wd[ind:]
# self.saved_dir = saved_prefix + "copy_" + saved_folder
# if "copy_" + saved_folder in os.listdir(saved_prefix):
# shutil.rmtree(self.saved_dir)
# shutil.copytree(self.WD, self.saved_dir)
# self.last_saved_time.Clear()
# now = datetime.datetime.now()
# now_string = "{}:{}:{}".format(now.hour, now.minute, now.second)
# self.last_saved_time.write(now_string)
# os.chdir(self.WD)
# except:# OSError:
# print "-I Problem copying working directory"
# self.last_saved_time.Clear()
# self.last_saved_time.write("not saved")
def on_btn_thellier_gui(self, event):
"""
Open Thellier GUI
"""
if not self.check_for_meas_file():
return
if not self.check_for_uncombined_files():
return
outstring = "thellier_gui.py -WD %s"%self.WD
print("-I- running python script:\n %s"%(outstring))
# disable and hide Pmag GUI mainframe
self.Disable()
self.Hide()
# show busyinfo
wait = wx.BusyInfo('Compiling required data, please wait...')
wx.SafeYield()
# create custom Thellier GUI closing event and bind it
ThellierGuiExitEvent, EVT_THELLIER_GUI_EXIT = newevent.NewCommandEvent()
self.Bind(EVT_THELLIER_GUI_EXIT, self.on_analysis_gui_exit)
# make and show the Thellier GUI frame
thellier_gui_frame = thellier_gui.Arai_GUI(self.WD, self,
standalone=False,
DM=3,
evt_quit=ThellierGuiExitEvent)
if not thellier_gui_frame: print("Thellier GUI failed to start aborting"); del wait; return
thellier_gui_frame.Centre()
thellier_gui_frame.Show()
del wait
def on_btn_demag_gui(self, event):
"""
Open Demag GUI
"""
if not self.check_for_meas_file():
return
if not self.check_for_uncombined_files():
return
outstring = "demag_gui.py -WD %s"%self.WD
print("-I- running python script:\n %s"%(outstring))
# disable and hide Pmag GUI mainframe
self.Disable()
self.Hide()
# show busyinfo
wait = wx.BusyInfo('Compiling required data, please wait...')
wx.SafeYield()
# create custom Demag GUI closing event and bind it
DemagGuiExitEvent, EVT_DEMAG_GUI_EXIT = newevent.NewCommandEvent()
self.Bind(EVT_DEMAG_GUI_EXIT, self.on_analysis_gui_exit)
# make and show the Demag GUI frame
demag_gui_frame = demag_gui.Demag_GUI(self.WD, self,
write_to_log_file=False,
data_model=3,
evt_quit=DemagGuiExitEvent)
demag_gui_frame.Centre()
demag_gui_frame.Show()
del wait
def on_analysis_gui_exit(self, event):
"""
When Thellier or Demag GUI closes,
show and enable Pmag GUI main frame.
Read in an updated contribution object
based on any changed files.
"""
self.Enable()
self.Show()
# also, refresh contribution object based on files
# that may have been written/overwritten by Thellier GUI
self.get_wd_data()
def on_convert_file(self, event):
pmag_dialogs_dia = pmag_gui_dialogs.import_magnetometer_data(self, wx.ID_ANY, '', self.WD)
pmag_dialogs_dia.Show()
pmag_dialogs_dia.Center()
# self.Hide()
def on_btn_convert_3(self, event):
"""
Open dialog for rough conversion of
2.5 files to 3.0 files.
Offer link to earthref for proper upgrade.
"""
dia = pw.UpgradeDialog(None)
dia.Center()
res = dia.ShowModal()
if res == wx.ID_CANCEL:
webbrowser.open("https://www2.earthref.org/MagIC/upgrade", new=2)
return
## more nicely styled way, but doesn't link to earthref
#msg = "This tool is meant for relatively simple upgrades (for instance, a measurement file, a sample file, and a criteria file).\nIf you have a more complex contribution to upgrade, and you want maximum accuracy, use the upgrade tool at https://www2.earthref.org/MagIC/upgrade.\n\nDo you want to continue?"
#result = pw.warning_with_override(msg)
#if result == wx.ID_NO:
#webbrowser.open("https://www2.earthref.org/MagIC/upgrade", new=2)
#return
# turn files from 2.5 --> 3.0 (rough translation)
meas, upgraded, no_upgrade = pmag.convert_directory_2_to_3('magic_measurements.txt',
input_dir=self.WD, output_dir=self.WD,
data_model=self.contribution.data_model)
if not meas:
wx.MessageBox('2.5 --> 3.0 failed. Do you have a magic_measurements.txt file in your working directory?',
'Info', wx.OK | wx.ICON_INFORMATION)
return
# create a contribution
self.contribution = cb.Contribution(self.WD)
# make skeleton files with specimen, sample, site, location data
self.contribution.propagate_measurement_info()
# pop up
upgraded_string = ", ".join(upgraded)
if no_upgrade:
no_upgrade_string = ", ".join(no_upgrade)
msg = '2.5 --> 3.0 translation completed!\n\nThese 3.0 format files were created: {}.\n\nHowever, these 2.5 format files could not be upgraded: {}.\n\nTo convert all 2.5 files, use the MagIC upgrade tool: https://www2.earthref.org/MagIC/upgrade\n'.format(upgraded_string, no_upgrade_string)
if 'criteria.txt' in upgraded:
msg += '\nNote: Please check your criteria file for completeness and accuracy, as not all 2.5 files will be fully upgraded.'
if 'pmag_criteria.txt' in no_upgrade:
msg += '\nNote: Not all criteria files can be upgraded, even on the MagIC site. You may need to recreate an old pmag_criteria file from scratch in Thellier GUI or Demag GUI.'
wx.MessageBox(msg, 'Warning', wx.OK | wx.ICON_INFORMATION)
else:
msg = '2.5 --> 3.0 translation completed!\nThese files were converted: {}'.format(upgraded_string)
wx.MessageBox(msg, 'Info', wx.OK | wx.ICON_INFORMATION)
def on_btn_metadata(self, event):
"""
Initiate the series of windows to add metadata
to the contribution.
"""
# make sure we have a measurements file
if not self.check_for_meas_file():
return
# make sure all files of the same type have been combined
if not self.check_for_uncombined_files():
return
wait = wx.BusyInfo('Compiling required data, please wait...')
wx.SafeYield()
self.ErMagic_frame = ErMagicBuilder.MagIC_model_builder3(self.WD, self, self.contribution)
#
self.ErMagic_frame.Show()
self.ErMagic_frame.Center()
# gets total available screen space - 10%
size = wx.DisplaySize()
size = (size[0] - 0.3 * size[0], size[1] - 0.3 * size[1])
self.ErMagic_frame.Raise()
del wait
def init_check_window(self):
"""
initiates the object that will control steps 1-6
of checking headers, filling in cell values, etc.
"""
self.check_dia = pmag_er_magic_dialogs.ErMagicCheckFrame3(self, 'Check Data',
self.WD, self.contribution)
def on_btn_orientation(self, event):
"""
Create and fill wxPython grid for entering
orientation data.
"""
wait = wx.BusyInfo('Compiling required data, please wait...')
wx.SafeYield()
#dw, dh = wx.DisplaySize()
size = wx.DisplaySize()
size = (size[0]-0.1 * size[0], size[1]-0.1 * size[1])
frame = pmag_gui_dialogs.OrientFrameGrid3(self, -1, 'demag_orient.txt',
self.WD, self.contribution,
size)
frame.Show(True)
frame.Centre()
self.Hide()
del wait
def on_btn_unpack(self, event):
"""
Create dialog to choose a file to unpack
with download magic.
Then run download_magic and create self.contribution.
"""
def magic_download_dia(warn=""):
dia = pw.TextDialog(self, "Download from MagIC\nusing contribution id or DOI", "MagIC id/DOI", warn)
res = dia.ShowModal()
magic_id = dia.text_ctrl.return_value()
if res == wx.ID_CANCEL:
return wx.ID_CANCEL
if res == wx.ID_OK:
return magic_id
else:
return False
dlg = pw.ChooseOne(self, "Download from MagIC",
"Unpack previous downloaded file",
text="You can unpack a downloaded file from MagIC, or download a file from MagIC directly using the contribution id or DOI.", title="")
dlg.Centre()
res = dlg.ShowModal()
# try to download directly from MagIC
if res == wx.ID_YES:
magic_id = True
warning = ""
while magic_id:
magic_id = magic_download_dia(warning)
# if magic id was blank
if magic_id == "":
warning = "You must provide a MagIC contribution id or DOI"
magic_id = True
continue
# if user canceled the download
if magic_id == wx.ID_CANCEL:
return
# if everything looks good, try to download
if len(str(magic_id)) < 8: # use contribution id
status, stuff = ipmag.download_magic_from_id(magic_id)
f = "magic_contribution_{}.txt".format(magic_id)
else: # use DOI
status, stuff = ipmag.download_magic_from_doi(magic_id)
f = "magic_contribution.txt"
if not status:
warning = stuff
if status:
break
if not os.path.exists(os.path.join(self.WD, f)):
os.rename(os.path.join(os.getcwd(), f), os.path.join(self.WD, f))
input_dir = self.WD
# try to unpack a previously downloaded file
if res == wx.ID_NO:
dlg = wx.FileDialog(
None, message = "choose txt file to unpack",
defaultDir=self.WD,
defaultFile="",
style=wx.FD_OPEN #| wx.FD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
FILE = dlg.GetPath()
input_dir, f = os.path.split(FILE)
else:
return False
outstring="download_magic.py -f {} -WD {} -ID {}".format(f, self.WD, input_dir)
# run as module:
print("-I- running python script:\n %s"%(outstring))
wait = wx.BusyInfo("Please wait, working...")
wx.SafeYield()
ex = None
try:
if ipmag.download_magic(f, self.WD, input_dir, overwrite=True, data_model=self.data_model):
text = "Successfully ran download_magic.py program.\nMagIC files were saved in your working directory.\nSee Terminal/message window for details."
else:
text = "Something went wrong. Make sure you chose a valid file downloaded from the MagIC database and try again."
return
except Exception as ex:
text = "Something went wrong. Make sure you chose a valid file downloaded from the MagIC database and try again."
del wait
dlg = wx.MessageDialog(self, caption="Saved", message=text, style=wx.OK)
result = dlg.ShowModal()
if result == wx.ID_OK:
dlg.Destroy()
if ex:
raise(ex)
return
self.contribution = cb.Contribution(self.WD)
# make a success pop-up
dlg = wx.MessageDialog(self, caption="Success", message="You can now add orientation information or metadata, or open one of the analysis tools", style=wx.OK)
dlg.ShowModal()
def on_btn_upload(self, event):
"""
Try to run upload_magic.
Open validation mode if the upload file has problems.
"""
if not self.check_for_uncombined_files():
return
outstring="upload_magic.py"
print("-I- running python script:\n %s"%(outstring))
wait = wx.BusyInfo("Please wait, working...")
wx.SafeYield()
success_responses = ['200', 200, '201', 201, True]
if 'measurements' in self.contribution.tables:
self.contribution.tables['measurements'].add_measurement_names()
upload_file, val_response, dummy1, dummy2 = ipmag.upload_magic(concat=False, input_dir_path=self.WD, dir_path=self.WD)
del wait
if val_response == "no 3.0 files found, upload file not created":
pw.simple_warning("No 3.0 files were found in your directory, so no upload could be created!")
return
status = val_response['status']
if not status:
pw.simple_warning("Oops, something went wrong with validating on the server.\n{}\nTry again later or submit a bug report.".format(val_response['warnings']))
return
validation_errors = val_response['validation']
if (not validation_errors['warnings']) and (not validation_errors['errors']):
text = "You are ready to upload!\n{} was generated in {}".format(os.path.split(upload_file)[1], self.WD)
dlg = pw.ChooseOne(self, "Go to MagIC for uploading", "Not ready yet", text, "Saved")
dlg.Centre()
result = dlg.ShowModal()
if result == wx.ID_OK:
dlg.Destroy()
if result == wx.ID_YES:
pw.on_database_upload(None)
return
# there were problems, so display validation
text = "There were some problems with the creation of your upload file.\nSee Terminal/message window for details"
dlg = wx.MessageDialog(self, caption="Error", message=text, style=wx.OK)
dlg.Centre()
result = dlg.ShowModal()
# TODO: get the error-y business formatted into a dict of lists of dicts
from programs import magic_gui
self.Disable()
self.Hide()
self.magic_gui_frame = magic_gui.MainFrame(self.WD,
dmodel=self.data_model,
title="Validations",
contribution=self.contribution,
errors=validation_errors['errors'])
self.magic_gui_frame.Centre()
self.magic_gui_frame.Show()
self.magic_gui_frame.highlight_problems()
# bind that button to quitting magic gui and re-enabling Pmag GUI
self.magic_gui_frame.Bind(wx.EVT_BUTTON, self.on_end_validation, self.magic_gui_frame.return_btn)
# do binding so that closing/quitting re-opens the main frame
self.magic_gui_frame.Bind(wx.EVT_CLOSE, self.on_end_validation)
# this makes it work with only the validation window open
self.magic_gui_frame.Bind(wx.EVT_MENU,
lambda event: self.menubar.on_quit(event, self.magic_gui_frame),
self.magic_gui_frame.menubar.file_quit)
# this makes it work if an additional grid is open
self.Bind(wx.EVT_MENU,
lambda event: self.menubar.on_quit(event, self.magic_gui_frame),
self.magic_gui_frame.menubar.file_quit)
def on_end_validation(self, event):
"""
Switch back from validation mode to main Pmag GUI mode.
Hide validation frame and show main frame.
"""
self.Enable()
self.Show()
self.magic_gui_frame.Destroy()
def on_menu_exit(self, event):
"""
Exit the GUI
"""
# also delete appropriate copy file
try:
self.help_window.Destroy()
except:
pass
if '-i' in sys.argv:
self.Destroy()
try:
sys.exit() # can raise TypeError if wx inspector was used
except Exception as ex:
if isinstance(ex, TypeError):
pass
else:
raise ex
def check_for_uncombined_files(self):
"""
Go through working directory and check for uncombined files.
(I.e., location1_specimens.txt and location2_specimens.txt but no specimens.txt.)
Show a warning if uncombined files are found.
Return True if no uncombined files are found OR user elects
to continue anyway.
"""
wd_files = os.listdir(self.WD)
ftypes = ['specimens.txt', 'samples.txt', 'sites.txt', 'locations.txt']
uncombined = set()
for ftype in ftypes:
if ftype not in wd_files:
for f in wd_files:
if f.endswith('_' + ftype):
uncombined.add(ftype)
if uncombined:
msg = 'It looks like you may have uncombined files of type(s) {} in your working directory.\nYou may want to go back to Step 1 and finish combining all files.\nIf you continue, the program will try to extract as much information as possible from your measurement file.'.format(", ".join(list(uncombined)))
dlg = pw.ChooseOne(self, 'Continue anyway', 'Go back', msg, title="Warning!")
res = dlg.ShowModal()
if res == wx.ID_NO:
return
return True
def check_for_meas_file(self):
"""
Check the working directory for a measurement file.
If not found, show a warning and return False.
Otherwise return True.
"""
meas_file_name = "measurements.txt"
dm = "3.0"
if not os.path.isfile(os.path.join(self.WD, meas_file_name)):
pw.simple_warning("Your working directory must have a {} format {} file to run this step. Make sure you have fully completed step 1 (import magnetometer file) and ALSO converted to 3.0., if necessary), then try again.\n\nIf you are trying to look at data downloaded from MagIC, you must unpack the txt file first. Some contributions do not contain measurement data, in which case you won't be able to use this function.".format(dm, meas_file_name))
return False
return True
def main():
if '-h' in sys.argv:
help_msg = """
Runs Pmag GUI for uploading, downloading, analyzing and visualizing
data.
SYNTAX
pmag_gui.py [command line options]
# or, for Anaconda users:
pmag_gui_anaconda [command line options]
OPTIONS
-WD DIR: working directory, default current directory
EXAMPLE
pmag_gui.py -WD projects/my_project
INFORMATION
See https://earthref.org/PmagPy/cookbook/#pmag_gui.py for a complete tutorial
"""
print(help_msg)
sys.exit()
print('-I- Starting Pmag GUI - please be patient')
# if redirect is true, wxpython makes its own output window for stdout/stderr
if 'darwin' in sys.platform and (not set_env.IS_FROZEN):
app = wx.App(redirect=False)
else:
app = wx.App(redirect=True)
dir_path = pmag.get_named_arg("-WD", None)
app.frame = MagMainFrame(WD=dir_path)
app.frame.Show()
app.frame.Center()
## use for debugging:
#if '-i' in sys.argv:
# import wx.lib.inspection
# wx.lib.inspection.InspectionTool().Show()
app.MainLoop()
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
import os
import sys
import argparse
import collections
import json
import logging
import random
import requests
import subprocess
import yaml
import time
logging.basicConfig(format="%(message)s")
POD = collections.namedtuple('Pod', ['name', 'namespace', 'ip', 'labels'])
"""
XDS fetches routing information from pilot.
The information is in XDS V1 format.
"""
PILOT_SVC = "istio-pilot"
ISTIO_NS = "istio-system"
CLUSTER = "istio-proxy"
ENVOY_PORT = 15000
LOCAL_PORT_START = 50000
LOCAL_PORT_STOP = 60000
class XDS(object):
def __init__(self, url, ns=ISTIO_NS, cluster=CLUSTER, headers=None):
self.url = url
self.ns = ns
self.cluster = cluster
self.headers = headers
self.cds_info = {}
self.sds_info = {}
def key(self, pod):
role = "sidecar"
if "ingress" in pod.name:
role = "ingress"
elif "egress" in pod.name:
role = "egress"
return "{role}~{pod.ip}~{pod.name}.{pod.namespace}~{pod.namespace}.svc.cluster.local".format(
role=role, pod=pod)
def query(self, path, post=False):
url = self.url + path
logging.info(url)
try:
if post:
return requests.post(url, headers=self.headers)
else:
return requests.get(url, headers=self.headers).json()
except Exception as ex:
logging.error(ex)
logging.error("Is pilot accessible at %s?" % url)
sys.exit(-1)
def lds(self, pod, hydrate=False):
data = self.query("/v1/listeners/{cluster}/{key}".format(
cluster=self.cluster, key=self.key(pod)))
if not hydrate:
return data
# call rds
for l in data['listeners']:
for f in l['filters']:
if 'config' not in f:
continue
if 'rds' not in f['config']:
continue
if 'route_config_name' not in f['config']['rds']:
continue
rn = f['config']['rds']['route_config_name']
# found a route fetch it
f['config']['route_config'] = self.rds(pod, rn, hydrate)
f['config']['route_config']['name'] = rn
data["clusters"] = self.cds_info
return data
def rds(self, pod, route="80", hydrate=False):
data = self.query("/v1/routes/{route}/{cluster}/{key}".format(
route=route, cluster=self.cluster, key=self.key(pod)))
if not hydrate:
return data
# check if we should hydrate cds
for vh in data['virtual_hosts']:
for route in vh['routes']:
if 'cluster' in route:
cn = route['cluster']
route['cluster'] = self.cds(pod, cn, hydrate)
elif 'weighted_clusters' in route:
for cls in route['weighted_clusters']['clusters']:
cn = cls['name']
cls['cluster'] = self.cds(pod, cn, hydrate)
return data
def cds(self, pod, cn, hydrate=False):
pk = self.key(pod)
if pk not in self.cds_info:
data = self.query("/v1/clusters/{cluster}/{key}".format(
cluster=self.cluster, key=self.key(pod)))
self.cds_info[pk] = {c['name']: c for c in data['clusters']}
if hydrate:
for sn, cl in self.cds_info[pk].items():
if cl['type'] != "sds":
continue
cl['endpoints'] = self.sds(cl['service_name'])
return self.cds_info[pk][cn]
def sds(self, service_key):
if service_key not in self.sds_info:
self.sds_info[service_key] = self.query(
"/v1/registration/{service_key}".format(service_key=service_key))
return self.sds_info[service_key]
def cache_stats(self):
return self.query("/cache_stats")
def clear_cache_stats(self):
return self.query("/cache_stats_delete", post=True)
# Class XDS end
# Proxy class
"""
Proxy uses envoy admin port to fetch routing information.
Proxy provides data in XDS V2 format.
"""
class Proxy(object):
def __init__(self, envoy_url):
self.envoy_url = envoy_url
def query(self, path, use_json=True):
url = self.envoy_url + path
logging.info(url)
try:
s = requests.post(url)
except Exception as ex:
logging.error(ex)
logging.error("Is envoy accessible at %s?" % url)
sys.exit(-1)
if use_json:
return s.json()
else:
return s.text
def routes(self):
return self.query("/routes")
def clusters(self):
return self.query("/clusters", use_json=False)
def listeners(self):
return self.query("/listeners")
def pod_info():
op = subprocess.check_output(
"kubectl get pod --all-namespaces -o json".split())
o = json.loads(op)
return {i['metadata']['name']:
POD(i['metadata']['name'], i['metadata']['namespace'],
i['status']['podIP'], i['metadata']['labels']) for i in o['items']}
def searchpod(pi, searchstr):
podname = podns = podip = ""
if "." in searchstr:
si = searchstr.split('.')
if len(si) != 3 and len(si) != 2:
logging.warning(
"podname must be either name.namespace.podip or name.namespace or any string that's a pod's label or a prefix of a pod's name")
return None
podname = si[0]
podns = si[1]
if len(si) == 3:
podip = si[2]
pods = []
for pn, pod in pi.items():
if podname and podns:
if podip:
if podname == pod.name and podns == pod.namespace and podip == pod.ip:
pods.append(pod)
return pods
elif podname == pod.name and podns == pod.namespace:
pods.append(pod)
return pods
elif searchstr in pod.labels.values():
pods.append(pod)
elif pn.startswith(searchstr):
pods.append(pod)
return pods
def start_port_forward(pod_name, namespace, remote_port):
local_port = random.randrange(LOCAL_PORT_START, LOCAL_PORT_STOP)
port_forward_pid = ""
url = ""
try:
port_forward_pid = subprocess.Popen("kubectl --namespace={namespace} port-forward {pod_name} {local_port}:{remote_port}".format(
pod_name=pod_name, namespace=namespace, local_port=local_port, remote_port=remote_port).split(), stdout=open(os.devnull, "wb")).pid
except:
logging.error("Failed to create port-forward for pod %s.%s with remote port %s" %
(pod_name, namespace, remote_port))
raise
else:
url = "http://localhost:{port}".format(port=local_port)
# wait until the port-forward process is fully up
while True:
try:
requests.get(url)
except:
time.sleep(.1)
else:
break
return url, port_forward_pid
def find_pilot_url():
try:
pilot_svc = subprocess.check_output(
"kubectl get svc {svc} -n {ns} -o json".format(svc=PILOT_SVC, ns=ISTIO_NS).split())
except:
pilot_svc = {}
pilot_url = ""
pilot_port = ""
port_forward_pid = ""
if pilot_svc:
pilot_spec = json.loads(pilot_svc)['spec']
disovery_port = ""
legacy_discovery_port = ""
for port in pilot_spec['ports']:
if port['name'] == 'http-legacy-discovery':
legacy_discovery_port = port['port']
elif port['name'] == 'http-discovery':
discovery_port = port['port']
if legacy_discovery_port:
pilot_port = legacy_discovery_port
else:
pilot_port = discovery_port
pilot_url = "http://{ip}:{port}".format(
ip=pilot_spec['clusterIP'], port=pilot_port)
try:
requests.get(pilot_url, timeout=2)
except:
logging.warning(
"It seems that you are running outside the k8s cluster")
logging.warning(
"Let's try to create a port-forward to access pilot")
cmd = "kubectl --namespace=%s get -l istio=pilot pod -o=jsonpath={.items[0].metadata.name}" % (
ISTIO_NS)
pod_name = subprocess.check_output(cmd.split())
pilot_url, port_forward_pid = start_port_forward(
pod_name, ISTIO_NS, pilot_port)
return pilot_url, port_forward_pid
def main(args):
pods = searchpod(pod_info(), args.podname)
if not pods:
logging.error("Cound not find pod %s" % args.podname)
return -1
if len(pods) > 1:
podnames = ["%s.%s" % (pod.name, pod.namespace) for pod in pods]
logging.error("More than one pod is found: %s" % ", ".join(podnames))
return -1
pod = pods[0]
if args.output is None:
output_dir = "/tmp/" + pod.name
else:
output_dir = args.output + "/" + pod.name
try:
os.makedirs(output_dir)
except OSError:
if not os.path.isdir(output_dir):
raise
if not args.skip_pilot:
pilot_url = args.pilot_url
pilot_port_forward_pid = ""
if pilot_url:
if not pilot_url.startswith("http://") and not pilot_url.startswith("https://"):
pilot_url = "http://" + pilot_url
else:
pilot_url, pilot_port_forward_pid = find_pilot_url()
output_file = output_dir + "/" + "pilot_xds.yaml"
op = open(output_file, "wt")
logging.info("Fetching from Pilot for pod %s in %s namespace" %
(pod.name, pod.namespace))
xds = XDS(url=pilot_url)
data = xds.lds(pod, True)
yaml.safe_dump(data, op, default_flow_style=False,
allow_unicode=False, indent=2)
print "Wrote ", output_file
if args.cache_stats:
output_file = output_dir + "/" + "stats_xds.yaml"
op = open(output_file, "wt")
data = xds.cache_stats()
logging.info("Fetching Pilot cache stats")
yaml.safe_dump(data, op, default_flow_style=False,
allow_unicode=False, indent=2)
print "Wrote ", output_file
if args.show_ssl_summary:
for l in data["listeners"]:
state = "SSL" if "ssl_context" in l else "PLAINTEXT"
logging.info(
"Listener {0:30s} : {1:10s}".format(l["name"], state))
if args.clear_cache_stats:
xds.clear_cache_stats()
if pilot_port_forward_pid:
subprocess.call(["kill", "%s" % pilot_port_forward_pid])
if not args.skip_envoy:
envoy_url, envoy_port_forward_pid = start_port_forward(
pod.name, pod.namespace, ENVOY_PORT)
logging.info("Fetching from Envoy for pod %s in %s namespace" %
(pod.name, pod.namespace))
pr = Proxy(envoy_url)
output_file = output_dir + "/" + "proxy_routes.yaml"
op = open(output_file, "wt")
data = pr.routes()
yaml.safe_dump(data, op, default_flow_style=False,
allow_unicode=False, indent=2)
print "Wrote ", output_file
output_file = output_dir + "/" + "proxy_listeners.yaml"
op = open(output_file, "wt")
data = pr.listeners()
yaml.safe_dump(data, op, default_flow_style=False,
allow_unicode=False, indent=2)
print "Wrote ", output_file
output_file = output_dir + "/" + "proxy_clusters.yaml"
op = open(output_file, "wt")
data = pr.clusters()
op.write(data)
print "Wrote ", output_file
if envoy_port_forward_pid:
subprocess.call(["kill", "%s" % envoy_port_forward_pid])
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Fetch routes from Envoy or Pilot for a given pod")
parser.add_argument("--pilot_url",
help="Often this is localhost:8080 or 15005 (https) or 15007 (http) through a port-forward."
" \n\nkubectl --namespace=istio-system port-forward $(kubectl --namespace=istio-system get -l istio=pilot pod -o=jsonpath='{.items[0].metadata.name}') 8080:8080."
"\n\nIf not provided, attempt will be made to find it out."
)
parser.add_argument("podname", help="podname must be either name.namespace.podip or name.namespace or any string that is a pod's label or a prefix of a pod's name. ingress, mixer, citadel, product-page all work")
parser.add_argument(
"--output", help="A directory where output files are saved. default is the /tmp directory")
parser.add_argument(
"--skip_envoy", action='store_true', help="Fetch Envoy configuration from a pod")
parser.add_argument(
"--skip_pilot", action='store_true', help="Fetch from pilot Proxy configuration for a pod")
parser.add_argument(
"--show_ssl_summary",
action="store_true",
help="If set, show summary for ssl context for listeners that have it")
parser.add_argument(
"--cache_stats", action='store_true', help="Fetch Pilot cache stats")
parser.add_argument(
"--clear_cache_stats", action='store_true', help="Clear Pilot cache stats")
args = parser.parse_args()
sys.exit(main(args))
|
|
# Generated by Django 2.1 on 2018-08-31 13:17
import django.db.models.deletion
from django.conf import settings
from django.contrib.auth.models import Permission
from django.db import migrations, models
import openslides
def create_comment_sections_from_config_and_move_comments_to_own_model(
apps, schema_editor
):
ConfigStore = apps.get_model("core", "ConfigStore")
Motion = apps.get_model("motions", "Motion")
MotionComment = apps.get_model("motions", "MotionComment")
MotionCommentSection = apps.get_model("motions", "MotionCommentSection")
Group = apps.get_model(settings.AUTH_GROUP_MODEL)
# try to get old motions_comments config variable, where all comment fields are saved
try:
motions_comments = ConfigStore.objects.get(key="motions_comments")
except ConfigStore.DoesNotExist:
return
comments_sections = motions_comments.value
# Delete config value
motions_comments.delete()
# Get can_see_comments and can_manage_comments permissions and the associated groups
can_see_comments = Permission.objects.filter(codename="can_see_comments")
if len(can_see_comments) == 1:
# Save groups. list() is necessary to evaluate the database query right now.
can_see_groups = list(can_see_comments.get().group_set.all())
else:
can_see_groups = Group.objects.all()
can_manage_comments = Permission.objects.filter(codename="can_manage_comments")
if len(can_manage_comments) == 1:
# Save groups. list() is necessary to evaluate the database query right now.
can_manage_groups = list(can_manage_comments.get().group_set.all())
else:
can_manage_groups = Group.objects.all()
# Create comment sections. Map them to the old ids, so we can find the right section
# when creating actual comments
old_id_mapping = {}
# Keep track of the special comment sections "forState" and "forRecommendation". If a
# comment is found, the comment value will be assigned to new motion fields and not comments.
forStateId = None
forRecommendationId = None
for id, section in comments_sections.items():
if section is None:
continue
if section.get("forState", False):
forStateId = id
elif section.get("forRecommendation", False):
forRecommendationId = id
else:
comment_section = MotionCommentSection(name=section["name"])
comment_section.save(skip_autoupdate=True)
comment_section.read_groups.add(*[group.id for group in can_see_groups])
comment_section.write_groups.add(*[group.id for group in can_manage_groups])
old_id_mapping[id] = comment_section
# Create all comments objects
comments = []
for motion in Motion.objects.all():
if not isinstance(motion.comments, dict):
continue
for section_id, comment_value in motion.comments.items():
# Skip empty sections.
comment_value = comment_value.strip()
if comment_value == "":
continue
# Special comments will be moved to separate fields.
if section_id == forStateId:
motion.state_extension = comment_value
motion.save(skip_autoupdate=True)
elif section_id == forRecommendationId:
motion.recommendation_extension = comment_value
motion.save(skip_autoupdate=True)
else:
comment = MotionComment(
comment=comment_value,
motion=motion,
section=old_id_mapping[section_id],
)
comments.append(comment)
MotionComment.objects.bulk_create(comments)
class Migration(migrations.Migration):
dependencies = [("users", "0006_user_email"), ("motions", "0011_motion_version")]
operations = [
# Cleanup from last migration. Somehow cannot be done there.
migrations.AlterField( # remove default=''
model_name="motion", name="text", field=models.TextField()
),
migrations.AlterField( # remove default=''
model_name="motion", name="title", field=models.CharField(max_length=255)
),
migrations.AlterField( # remove null=True
model_name="motionchangerecommendation",
name="motion",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="change_recommendations",
to="motions.Motion",
),
),
# Add extension fields for former "special comments". No hack anymore..
migrations.AddField(
model_name="motion",
name="recommendation_extension",
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name="motion",
name="state_extension",
field=models.TextField(blank=True, null=True),
),
migrations.AlterModelOptions(
name="motion",
options={
"default_permissions": (),
"ordering": ("identifier",),
"permissions": (
("can_see", "Can see motions"),
("can_create", "Can create motions"),
("can_support", "Can support motions"),
("can_manage", "Can manage motions"),
),
"verbose_name": "Motion",
},
),
# Comments and CommentsSection models
migrations.CreateModel(
name="MotionComment",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("comment", models.TextField()),
],
options={"default_permissions": ()},
bases=(
openslides.utils.models.RESTModelMixin, # type: ignore
models.Model,
),
),
migrations.CreateModel(
name="MotionCommentSection",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255)),
(
"read_groups",
models.ManyToManyField(
blank=True,
related_name="read_comments",
to=settings.AUTH_GROUP_MODEL,
),
),
(
"write_groups",
models.ManyToManyField(
blank=True,
related_name="write_comments",
to=settings.AUTH_GROUP_MODEL,
),
),
],
options={"default_permissions": ()},
bases=(
openslides.utils.models.RESTModelMixin, # type: ignore
models.Model,
),
),
migrations.AddField(
model_name="motioncomment",
name="section",
field=models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="comments",
to="motions.MotionCommentSection",
),
),
migrations.AddField(
model_name="motioncomment",
name="motion",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="motions.Motion"
),
),
migrations.AlterUniqueTogether(
name="motioncomment", unique_together={("motion", "section")}
),
# Move the comments and sections
migrations.RunPython(
create_comment_sections_from_config_and_move_comments_to_own_model
),
# Remove old comment field from motion, use the new model instead
migrations.RemoveField(model_name="motion", name="comments"),
migrations.AlterField(
model_name="motioncomment",
name="motion",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="comments",
to="motions.Motion",
),
),
]
|
|
import numpy as np
from skimage.feature.util import (FeatureDetector, DescriptorExtractor,
_mask_border_keypoints,
_prepare_grayscale_input_2D)
from skimage.feature import (corner_fast, corner_orientations, corner_peaks,
corner_harris)
from skimage.transform import pyramid_gaussian
from .orb_cy import _orb_loop
OFAST_MASK = np.zeros((31, 31))
OFAST_UMAX = [15, 15, 15, 15, 14, 14, 14, 13, 13, 12, 11, 10, 9, 8, 6, 3]
for i in range(-15, 16):
for j in range(-OFAST_UMAX[abs(i)], OFAST_UMAX[abs(i)] + 1):
OFAST_MASK[15 + j, 15 + i] = 1
class ORB(FeatureDetector, DescriptorExtractor):
"""Oriented FAST and rotated BRIEF feature detector and binary descriptor
extractor.
Parameters
----------
n_keypoints : int, optional
Number of keypoints to be returned. The function will return the best
`n_keypoints` according to the Harris corner response if more than
`n_keypoints` are detected. If not, then all the detected keypoints
are returned.
fast_n : int, optional
The `n` parameter in `skimage.feature.corner_fast`. Minimum number of
consecutive pixels out of 16 pixels on the circle that should all be
either brighter or darker w.r.t test-pixel. A point c on the circle is
darker w.r.t test pixel p if ``Ic < Ip - threshold`` and brighter if
``Ic > Ip + threshold``. Also stands for the n in ``FAST-n`` corner
detector.
fast_threshold : float, optional
The ``threshold`` parameter in ``feature.corner_fast``. Threshold used
to decide whether the pixels on the circle are brighter, darker or
similar w.r.t. the test pixel. Decrease the threshold when more
corners are desired and vice-versa.
harris_k : float, optional
The `k` parameter in `skimage.feature.corner_harris`. Sensitivity
factor to separate corners from edges, typically in range ``[0, 0.2]``.
Small values of `k` result in detection of sharp corners.
downscale : float, optional
Downscale factor for the image pyramid. Default value 1.2 is chosen so
that there are more dense scales which enable robust scale invariance
for a subsequent feature description.
n_scales : int, optional
Maximum number of scales from the bottom of the image pyramid to
extract the features from.
Attributes
----------
keypoints : (N, 2) array
Keypoint coordinates as ``(row, col)``.
scales : (N, ) array
Corresponding scales.
orientations : (N, ) array
Corresponding orientations in radians.
responses : (N, ) array
Corresponding Harris corner responses.
descriptors : (Q, `descriptor_size`) array of dtype bool
2D array of binary descriptors of size `descriptor_size` for Q
keypoints after filtering out border keypoints with value at an
index ``(i, j)`` either being ``True`` or ``False`` representing
the outcome of the intensity comparison for i-th keypoint on j-th
decision pixel-pair. It is ``Q == np.sum(mask)``.
References
----------
.. [1] Ethan Rublee, Vincent Rabaud, Kurt Konolige and Gary Bradski
"ORB: An efficient alternative to SIFT and SURF"
http://www.vision.cs.chubu.ac.jp/CV-R/pdf/Rublee_iccv2011.pdf
Examples
--------
>>> from skimage.feature import ORB, match_descriptors
>>> img1 = np.zeros((100, 100))
>>> img2 = np.zeros_like(img1)
>>> np.random.seed(1)
>>> square = np.random.rand(20, 20)
>>> img1[40:60, 40:60] = square
>>> img2[53:73, 53:73] = square
>>> detector_extractor1 = ORB(n_keypoints=5)
>>> detector_extractor2 = ORB(n_keypoints=5)
>>> detector_extractor1.detect_and_extract(img1)
>>> detector_extractor2.detect_and_extract(img2)
>>> matches = match_descriptors(detector_extractor1.descriptors,
... detector_extractor2.descriptors)
>>> matches
array([[0, 0],
[1, 1],
[2, 2],
[3, 3],
[4, 4]])
>>> detector_extractor1.keypoints[matches[:, 0]]
array([[ 42., 40.],
[ 47., 58.],
[ 44., 40.],
[ 59., 42.],
[ 45., 44.]])
>>> detector_extractor2.keypoints[matches[:, 1]]
array([[ 55., 53.],
[ 60., 71.],
[ 57., 53.],
[ 72., 55.],
[ 58., 57.]])
"""
def __init__(self, downscale=1.2, n_scales=8,
n_keypoints=500, fast_n=9, fast_threshold=0.08,
harris_k=0.04):
self.downscale = downscale
self.n_scales = n_scales
self.n_keypoints = n_keypoints
self.fast_n = fast_n
self.fast_threshold = fast_threshold
self.harris_k = harris_k
self.keypoints = None
self.scales = None
self.responses = None
self.orientations = None
self.descriptors = None
def _build_pyramid(self, image):
image = _prepare_grayscale_input_2D(image)
return list(pyramid_gaussian(image, self.n_scales - 1, self.downscale))
def _detect_octave(self, octave_image):
# Extract keypoints for current octave
fast_response = corner_fast(octave_image, self.fast_n,
self.fast_threshold)
keypoints = corner_peaks(fast_response, min_distance=1)
if len(keypoints) == 0:
return (np.zeros((0, 2), dtype=np.double),
np.zeros((0, ), dtype=np.double),
np.zeros((0, ), dtype=np.double))
mask = _mask_border_keypoints(octave_image.shape, keypoints,
distance=16)
keypoints = keypoints[mask]
orientations = corner_orientations(octave_image, keypoints,
OFAST_MASK)
harris_response = corner_harris(octave_image, method='k',
k=self.harris_k)
responses = harris_response[keypoints[:, 0], keypoints[:, 1]]
return keypoints, orientations, responses
def detect(self, image):
"""Detect oriented FAST keypoints along with the corresponding scale.
Parameters
----------
image : 2D array
Input image.
"""
pyramid = self._build_pyramid(image)
keypoints_list = []
orientations_list = []
scales_list = []
responses_list = []
for octave in range(len(pyramid)):
octave_image = np.ascontiguousarray(pyramid[octave])
keypoints, orientations, responses = \
self._detect_octave(octave_image)
keypoints_list.append(keypoints * self.downscale ** octave)
orientations_list.append(orientations)
scales_list.append(self.downscale ** octave
* np.ones(keypoints.shape[0], dtype=np.intp))
responses_list.append(responses)
keypoints = np.vstack(keypoints_list)
orientations = np.hstack(orientations_list)
scales = np.hstack(scales_list)
responses = np.hstack(responses_list)
if keypoints.shape[0] < self.n_keypoints:
self.keypoints = keypoints
self.scales = scales
self.orientations = orientations
self.responses = responses
else:
# Choose best n_keypoints according to Harris corner response
best_indices = responses.argsort()[::-1][:self.n_keypoints]
self.keypoints = keypoints[best_indices]
self.scales = scales[best_indices]
self.orientations = orientations[best_indices]
self.responses = responses[best_indices]
def _extract_octave(self, octave_image, keypoints, orientations):
mask = _mask_border_keypoints(octave_image.shape, keypoints,
distance=20)
keypoints = np.array(keypoints[mask], dtype=np.intp, order='C',
copy=False)
orientations = np.array(orientations[mask], dtype=np.double, order='C',
copy=False)
descriptors = _orb_loop(octave_image, keypoints, orientations)
return descriptors, mask
def extract(self, image, keypoints, scales, orientations):
"""Extract rBRIEF binary descriptors for given keypoints in image.
Note that the keypoints must be extracted using the same `downscale`
and `n_scales` parameters. Additionally, if you want to extract both
keypoints and descriptors you should use the faster
`detect_and_extract`.
Parameters
----------
image : 2D array
Input image.
keypoints : (N, 2) array
Keypoint coordinates as ``(row, col)``.
scales : (N, ) array
Corresponding scales.
orientations : (N, ) array
Corresponding orientations in radians.
"""
pyramid = self._build_pyramid(image)
descriptors_list = []
mask_list = []
# Determine octaves from scales
octaves = (np.log(scales) / np.log(self.downscale)).astype(np.intp)
for octave in range(len(pyramid)):
# Mask for all keypoints in current octave
octave_mask = octaves == octave
if np.sum(octave_mask) > 0:
octave_image = np.ascontiguousarray(pyramid[octave])
octave_keypoints = keypoints[octave_mask]
octave_keypoints /= self.downscale ** octave
octave_orientations = orientations[octave_mask]
descriptors, mask = self._extract_octave(octave_image,
octave_keypoints,
octave_orientations)
descriptors_list.append(descriptors)
mask_list.append(mask)
self.descriptors = np.vstack(descriptors_list).view(np.bool)
self.mask_ = np.hstack(mask_list)
def detect_and_extract(self, image):
"""Detect oriented FAST keypoints and extract rBRIEF descriptors.
Note that this is faster than first calling `detect` and then
`extract`.
Parameters
----------
image : 2D array
Input image.
"""
pyramid = self._build_pyramid(image)
keypoints_list = []
responses_list = []
scales_list = []
orientations_list = []
descriptors_list = []
for octave in range(len(pyramid)):
octave_image = np.ascontiguousarray(pyramid[octave])
keypoints, orientations, responses = \
self._detect_octave(octave_image)
if len(keypoints) == 0:
keypoints_list.append(keypoints)
responses_list.append(responses)
descriptors_list.append(np.zeros((0, 256), dtype=np.bool))
continue
descriptors, mask = self._extract_octave(octave_image, keypoints,
orientations)
keypoints_list.append(keypoints[mask] * self.downscale ** octave)
responses_list.append(responses[mask])
orientations_list.append(orientations[mask])
scales_list.append(self.downscale ** octave
* np.ones(keypoints.shape[0], dtype=np.intp))
descriptors_list.append(descriptors)
keypoints = np.vstack(keypoints_list)
responses = np.hstack(responses_list)
scales = np.hstack(scales_list)
orientations = np.hstack(orientations_list)
descriptors = np.vstack(descriptors_list).view(np.bool)
if keypoints.shape[0] < self.n_keypoints:
self.keypoints = keypoints
self.scales = scales
self.orientations = orientations
self.responses = responses
self.descriptors = descriptors
else:
# Choose best n_keypoints according to Harris corner response
best_indices = responses.argsort()[::-1][:self.n_keypoints]
self.keypoints = keypoints[best_indices]
self.scales = scales[best_indices]
self.orientations = orientations[best_indices]
self.responses = responses[best_indices]
self.descriptors = descriptors[best_indices]
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the cinder-status CLI interfaces."""
from unittest import mock
import uuid
import ddt
from oslo_config import cfg
from oslo_upgradecheck import upgradecheck as uc
import testtools
import cinder.backup.manager # noqa
from cinder.cmd import status
from cinder import context
from cinder import db
from cinder.db.sqlalchemy import api as sqla_api
from cinder import exception
from cinder.tests.unit import fake_constants as fakes
from cinder.tests.unit import test
import cinder.volume.manager as volume_manager
CONF = cfg.CONF
@ddt.ddt
class TestCinderStatus(testtools.TestCase):
"""Test cases for the cinder-status upgrade check command."""
def _setup_database(self):
CONF.set_default('connection', 'sqlite://', 'database')
CONF.set_default('sqlite_synchronous', False, 'database')
self.useFixture(test.Database())
sqla_api._GET_METHODS = {}
self.addCleanup(CONF.reset)
def setUp(self):
super(TestCinderStatus, self).setUp()
self.checks = status.Checks()
# Make sure configuration is initialized
try:
CONF([], project='cinder')
except cfg.RequiredOptError:
# Doesn't matter in this situation
pass
# Make sure our expected path is returned
patcher = mock.patch.object(CONF, 'find_file')
self.addCleanup(patcher.stop)
self.find_file = patcher.start()
self.find_file.return_value = '/etc/cinder/'
self._setup_database()
self.context = context.get_admin_context()
def _set_config(self, key, value, group=None):
CONF.set_override(key, value, group=group)
self.addCleanup(CONF.clear_override, key, group=group)
def _set_backup_driver(self, driver_path):
CONF.set_override('backup_driver', driver_path)
self.addCleanup(CONF.clear_override, 'backup_driver')
def _set_volume_driver(self, volume_driver, enabled_backend):
CONF.register_opts(volume_manager.volume_backend_opts,
group=enabled_backend)
CONF.set_override('enabled_backends', enabled_backend)
CONF.set_override('volume_driver', volume_driver,
group=enabled_backend)
self.addCleanup(CONF.clear_override, 'volume_driver',
group=enabled_backend)
self.addCleanup(CONF.clear_override, 'enabled_backends')
def test_check_backup_module(self):
self._set_config(
'backup_driver',
'cinder.backup.drivers.swift.SwiftBackupDriver')
result = self.checks._check_backup_module()
self.assertEqual(uc.Code.SUCCESS, result.code)
def test_check_backup_module_not_class(self):
self._set_config('backup_driver', 'cinder.backup.drivers.swift')
result = self.checks._check_backup_module()
self.assertEqual(uc.Code.FAILURE, result.code)
self.assertIn('requires the full path', result.details)
def test_check_policy_file(self):
with mock.patch.object(self.checks, '_file_exists') as fe:
fe.return_value = False
result = self.checks._check_policy_file()
self.assertEqual(uc.Code.SUCCESS, result.code)
def test_check_policy_file_exists(self):
with mock.patch.object(self.checks, '_file_exists') as fe:
fe.return_value = True
result = self.checks._check_policy_file()
self.assertEqual(uc.Code.WARNING, result.code)
self.assertIn('policy.json file is present', result.details)
def test_check_policy_file_custom_path(self):
policy_path = '/my/awesome/configs/policy.yaml'
self._set_config('policy_file', policy_path, group='oslo_policy')
with mock.patch.object(self.checks, '_file_exists') as fe:
fe.return_value = False
result = self.checks._check_policy_file()
fe.assert_called_with(policy_path)
self.assertEqual(uc.Code.WARNING, result.code)
self.assertIn(policy_path, result.details)
def test_check_policy_file_custom_file(self):
policy_path = 'mypolicy.yaml'
self._set_config('policy_file', policy_path, group='oslo_policy')
with mock.patch.object(self.checks, '_file_exists') as fe:
fe.return_value = False
result = self.checks._check_policy_file()
fe.assert_called_with('/etc/cinder/%s' % policy_path)
self.assertEqual(uc.Code.WARNING, result.code)
self.assertIn(policy_path, result.details)
def test_check_periodic_interval_default(self):
# default value is 60
self._set_config('periodic_interval', 60)
result = self.checks._check_periodic_interval()
self.assertEqual(uc.Code.SUCCESS, result.code)
def test_check_periodic_interval_not_default(self):
# default value is 60
self._set_config('periodic_interval', 22)
result = self.checks._check_periodic_interval()
self.assertEqual(uc.Code.WARNING, result.code)
self.assertIn('New configuration options have been introduced',
result.details)
@ddt.data(['cinder.quota.DbQuotaDriver', True],
['cinder.quota.NestedDbQuotaDriver', False])
@ddt.unpack
def test_nested_quota_driver(self, driver, should_pass):
self._set_config('quota_driver', driver)
result = self.checks._check_nested_quota()
if should_pass:
expected = uc.Code.SUCCESS
else:
expected = uc.Code.FAILURE
self.assertEqual(expected, result.code)
def test_check_legacy_win_conf(self):
self._set_volume_driver(
'cinder.volume.drivers.windows.iscsi.WindowsISCSIDriver',
'winiscsi')
result = self.checks._check_legacy_windows_config()
self.assertEqual(uc.Code.SUCCESS, result.code)
def test_check_legacy_win_conf_fail(self):
self._set_volume_driver(
'cinder.volume.drivers.windows.windows.WindowsDriver',
'winiscsi')
result = self.checks._check_legacy_windows_config()
self.assertEqual(uc.Code.FAILURE, result.code)
self.assertIn('Please update to use', result.details)
def test_check_legacy_win_conf_no_drivers(self):
self._set_config('enabled_backends', None)
result = self.checks._check_legacy_windows_config()
self.assertEqual(uc.Code.SUCCESS, result.code)
def test_check_removed_drivers(self):
self._set_volume_driver(
'cinder.volume.drivers.lvm.LVMVolumeDriver',
'winiscsi')
result = self.checks._check_removed_drivers()
self.assertEqual(uc.Code.SUCCESS, result.code)
@ddt.data('cinder.volume.drivers.coprhd.fc.EMCCoprHDFCDriver',
'cinder.volume.drivers.coprhd.iscsi.EMCCoprHDISCSIDriver',
'cinder.volume.drivers.coprhd.scaleio.EMCCoprHDScaleIODriver',
'cinder.volume.drivers.disco.disco.DiscoDriver',
'cinder.volume.drivers.hgst.HGSTDriver',
'cinder.volume.drivers.hpe.hpe_lefthand_iscsi.'
'HPELeftHandISCSIDriver',
'cinder.volume.drivers.sheepdog.SheepdogDriver',
'cinder.volume.drivers.zfssa.zfssaiscsi.ZFSSAISCSIDriver',
'cinder.volume.drivers.zfssa.zfssanfs.ZFSSANFSDriver')
def test_check_removed_drivers_fail(self, volume_driver):
self._set_volume_driver(
volume_driver,
'testDriver')
result = self.checks._check_removed_drivers()
self.assertEqual(uc.Code.FAILURE, result.code)
self.assertIn(volume_driver, result.details)
# Check for singular version of result message
self.assertIn('This driver has been removed', result.details)
def test_check_multiple_removed_drivers_fail(self):
d1 = 'cinder.volume.drivers.coprhd.fc.EMCCoprHDFCDriver'
d3 = 'cinder.volume.drivers.coprhd.scaleio.EMCCoprHDScaleIODriver'
d5 = 'cinder.volume.drivers.hgst.HGSTDriver'
d2 = 'cinder.volume.drivers.foo.iscsi.FooDriver'
d4 = 'cinder.volume.drivers.bar.fc.BarFCDriver'
self._set_volume_driver(d1, 'b1')
self._set_volume_driver(d2, 'b2')
self._set_volume_driver(d3, 'b3')
self._set_volume_driver(d4, 'b4')
self._set_volume_driver(d5, 'b5')
CONF.set_override('enabled_backends', 'b1,b2,b3,b4,b5')
result = self.checks._check_removed_drivers()
self.assertEqual(uc.Code.FAILURE, result.code)
self.assertIn(d1, result.details)
self.assertIn(d3, result.details)
self.assertIn(d5, result.details)
self.assertNotIn(d2, result.details)
self.assertNotIn(d4, result.details)
# check for plural version of result message
self.assertIn('The following drivers', result.details)
def test_check_removed_drivers_no_drivers(self):
self._set_config('enabled_backends', None)
result = self.checks._check_removed_drivers()
self.assertEqual(uc.Code.SUCCESS, result.code)
@staticmethod
def uuid():
return str(uuid.uuid4())
def _create_service(self, **values):
values.setdefault('uuid', self.uuid())
db.service_create(self.context, values)
def _create_volume(self, **values):
values.setdefault('id', self.uuid())
values.setdefault('service_uuid', self.uuid())
try:
db.volume_create(self.context, values)
# Support setting deleted on creation
except exception.VolumeNotFound:
if values.get('deleted') is not True:
raise
def test__check_service_uuid_ok(self):
self._create_service()
self._create_service()
self._create_volume(volume_type_id=fakes.VOLUME_TYPE_ID)
# Confirm that we ignored deleted entries
self._create_volume(service_uuid=None, deleted=True,
volume_type_id=fakes.VOLUME_TYPE_ID)
result = self.checks._check_service_uuid()
self.assertEqual(uc.Code.SUCCESS, result.code)
def test__check_service_uuid_fail_service(self):
self._create_service()
self._create_service(uuid=None)
self._create_volume(volume_type_id=fakes.VOLUME_TYPE_ID)
result = self.checks._check_service_uuid()
self.assertEqual(uc.Code.FAILURE, result.code)
def test__check_service_uuid_fail_volume(self):
self._create_service()
self._create_volume(service_uuid=None,
volume_type_id=fakes.VOLUME_TYPE_ID)
result = self.checks._check_service_uuid()
self.assertEqual(uc.Code.FAILURE, result.code)
def test__check_attachment_specs_ok(self):
attach_uuid = self.uuid()
# Confirm that we ignore deleted attachment specs
db.attachment_specs_update_or_create(self.context, attach_uuid,
{'k': 'v'})
db.attachment_specs_delete(self.context, attach_uuid, 'k')
result = self.checks._check_attachment_specs()
self.assertEqual(uc.Code.SUCCESS, result.code)
def test__check_attachment_specs_fail(self):
db.attachment_specs_update_or_create(self.context, self.uuid(),
{'k': 'v', 'k2': 'v2'})
result = self.checks._check_attachment_specs()
self.assertEqual(uc.Code.FAILURE, result.code)
|
|
import numpy as np
import h5py
import ctypes as ct
from os import path
from numpy.ctypeslib import ndpointer
from scipy.fftpack import dct, idct
from sys import float_info
from collections import Counter
class CONSTANTS(ct.Structure):
_fields_ = [
('N', ct.c_int),
('P', ct.c_int),
('M', ct.c_int),
('K', ct.c_int),
('order', ct.c_int)]
class DATA(ct.Structure):
_fields_ = [
('X', ct.POINTER(ct.c_double)),
('S', ct.POINTER(ct.c_int)),
('U', ct.POINTER(ct.c_double)),
('C', ct.POINTER(ct.c_int)),
('I', ct.POINTER(ct.c_int))]
class SparseKMeans():
def __init__(self, n_clusters = 8, init = 'k-means++', n_init = 10, n_init_ROS = 1,
max_iter = 300, random_state = None, gamma = 0.05,
n_passes = 1, order = 2, verbose = False, use_ROS = True, fROS = None,
write_permission = False, compute_ROS = True,
full_init = True, init_resparsify = False, true_labels = None):
# assign constants
self.use_ROS = use_ROS
self.true_labels = true_labels
self.init_resparsify = init_resparsify
self.compute_ROS = compute_ROS
self.n_init_ROS = n_init_ROS
self.verbose = verbose
self.K = n_clusters
self.init = init
self.n_init = n_init
self.max_iter = max_iter
self.gamma = gamma
self.n_passes = n_passes
self.order = order
self.fROS = fROS
self.write_permission = write_permission
self.full_init = full_init
# prepare the imported C functions
cfn = ct.CDLL(path.abspath('kmeans.so'), mode = ct.RTLD_GLOBAL)
cfn.seedrand.restype = None
cfn.seedrand.argtypes = [ct.c_int]
cfn.initialize_centroids_kmpp.restype = None
cfn.initialize_centroids_kmpp.argtypes = [ct.POINTER(CONSTANTS),
ct.POINTER(DATA)]
cfn.update_centroids.restype = None
cfn.update_centroids.argtypes = [ct.POINTER(CONSTANTS),
ct.POINTER(DATA)]
cfn.update_assignments.restype = None
cfn.update_assignments.argtypes = [ct.POINTER(CONSTANTS),
ct.POINTER(DATA)]
cfn.update_centroids_2pass.restype = None
cfn.update_centroids_2pass.argtypes = [ct.POINTER(CONSTANTS),
ct.POINTER(DATA),
ndpointer(ct.c_double, flags="C_CONTIGUOUS")]
cfn.evaluate_objective.restype = ct.c_double
cfn.evaluate_objective.argtypes = [ct.POINTER(CONSTANTS),
ct.POINTER(DATA)]
cfn.overwrite_double.restype = None
cfn.overwrite_double.argtypes = [ndpointer(ct.c_double, flags = "C_CONTIGUOUS"),
ndpointer(ct.c_double, flags = "C_CONTIGUOUS"),
ct.c_int]
cfn.overwrite_int.restype = None
cfn.overwrite_int.argtypes = [ndpointer(ct.c_int, flags = "C_CONTIGUOUS"),
ndpointer(ct.c_int, flags = "C_CONTIGUOUS"),
ct.c_int]
self.cfn = cfn
# if there's a random seed give, set it in C executable and numpy
if random_state != None:
self.seedrand(random_state)
# preprocessing routines
def assign_data(self, data):
""" Assigns the data as an attribute. data can either be a numpy ndarray
or an h5py Dataset object. Sets the following attributes:
self.X array or h5py Dataset, NxP
self.X_type string, 'array' or 'h5py'
self.N # of rows of X (number of datapoints)
self.P # of cols of X (latent dimension)
self.gamma compression factor (recomputed)
self.M # reduced latent dimension
"""
if type(data) is h5py._hl.dataset.Dataset:
self.X_type = 'hdf5'
elif type(data) is np.ndarray:
self.X_type = 'array'
else:
raise Exception('Data must either be an hdf5 dataset or a numpy array.')
self.N, self.P = data.shape
self.X = data
if self.verbose:
print('Data assigned as {}, '.format(self.X_type) +
'X is {} by {} (data points by latent dimension).'.format(self.N, self.P))
# compute compression factor
if type(self.gamma) is float:
self.M = int(np.floor(self.P * self.gamma))
if type(self.gamma) is int:
self.M = self.gamma
# overwrite gamma (either because it was an int or to account for
# rounding from floor function above)
self.gamma = self.M/self.P
if self.verbose:
print('Latent dimension will be reduced from {} to'.format(self.P),
'{} for a compression factor of {}'.format(self.M, self.gamma))
def initialize_c_structs(self):
""" Set up the arrays we need to pass to C through ctypes."""
self.CONST = CONSTANTS(self.N,self.P,self.M,self.K,self.order)
# initialize centroids to 0 as placeholders
self.U = np.ascontiguousarray(np.zeros(self.K * self.P))
self.C = np.ascontiguousarray(np.zeros(self.N, dtype = 'int32'))
self.S = np.ascontiguousarray(np.zeros(self.N * self.M), dtype = 'int32')
self.HDX_sub = np.ascontiguousarray(np.zeros(self.N * self.M))
self.I = np.ascontiguousarray(np.zeros(self.K, dtype = 'int32'))
# instantiate the struct of pointers
self.D = DATA(self.HDX_sub.ctypes.data_as(ct.POINTER(ct.c_double)),
self.S.ctypes.data_as(ct.POINTER(ct.c_int)),
self.U.ctypes.data_as(ct.POINTER(ct.c_double)),
self.C.ctypes.data_as(ct.POINTER(ct.c_int)),
self.I.ctypes.data_as(ct.POINTER(ct.c_int)))
# ROS functions
def set_ROS(self):
""" Assigns the ROS and indices."""
if self.use_ROS:
# if we're told to compute it, do so
if self.compute_ROS == True:
HDX, D_indices = self.ROS(self.X[:])
# ... but write it if we're allowed to so
if self.write_permission == True:
self.write_ROS(self.fROS, HDX, D_indices)
# otherwise load it
elif self.fROS != None:
HDX, D_indices = self.read_ROS(self.fROS)
else:
# if we're not using the ROS just set HDX to be X
HDX = self.X[:].astype(float)
D_indices = []
self.HDX, self.D_indices = HDX, D_indices
def ROS(self, X_dense):
""" Apply the random orthogonal system transform to the full dataset, i.e.,
compute HDX. D is diagonal +=1 so we just flip column signs, and for now H
is a discrete cosine transform."""
if self.verbose:
print('Computing ROS.')
D_indices = [i for i in range(self.P) if np.random.choice([0,1])]
X_dense[:,D_indices] *= -1
X_dense = dct(X_dense, norm = 'ortho', axis = 1, overwrite_x = False)
return [X_dense, D_indices]
def read_ROS(self, fROS):
HDX = fROS['HDX']
D_indices = fROS['D_indices']
return [HDX, D_indices]
def write_ROS(self, fROS, HDX, D_indices):
""" Writes ROS and D_indices to file fROS"""
if self.write_permission == False:
raise Exception('Trying to write the ROS transform to disk but' +
'write_permission is False')
if 'HDX' in fROS:
if self.verbose:
print('Deleting existing ROS dataset in hdf5 file {}'.format(fROS))
del fROS['HDX']
if 'D_indices' in self.fROS:
if self.verbose:
print('Deleting existing D_indices dataset in hdf5 file {}'.format(fROS))
del fROS['D_indices']
if self.verbose:
print('Writing ROS and D_indices to hdf5 file {}'.format(fROS))
fROS.create_dataset('HDX', data = HDX, dtype = 'd')
#fROS.create_dataset('D_indices', data = D_indices, dtype = 'int4')
fROS.create_dataset('D_indices', data = D_indices, dtype = 'int')
def subsample(self, M):
col_inds = np.array([np.sort(np.random.choice(self.P,M, replace = False))
for i in range(self.N)], dtype = 'int32').flatten()
row_inds = [i for i in range(self.N) for j in range(M)]
HDX_sub = np.take(self.HDX[:].flatten(), [self.P * r + c for (r,c)
in zip(row_inds,col_inds)])
HDX_sub = HDX_sub.flatten()
return [HDX_sub, col_inds]
def set_HDX_sub(self):
HDX_sub, S = self.subsample(self.M)
self.cfn.overwrite_double(HDX_sub, self.HDX_sub, len(HDX_sub))
self.cfn.overwrite_int(S, self.S, len(HDX_sub))
def fit(self, data):
""" Wrapper to iterate over n_init x n_
"""
self.assign_data(data)
self.initialize_c_structs()
self.set_ROS()
if self.verbose:
print("N = {}".format(self.N))
best_objective = float_info.max
U_best = np.zeros(self.K * self.P)
results = []
for nros in range(self.n_init_ROS):
self.set_HDX_sub()
if self.verbose:
print('Subsampling HDX, run {} of {}'.format(nros+1, self.n_init_ROS))
for n in range(self.n_init):
if self.verbose:
print('Fitting trial {} of {}'.format(n+1, self.n_init),)
converged, ctr = self.fit_single_trial()
current_objective = self.evaluate_objective()
if self.verbose:
print(' objective value = {}'.format(current_objective))
# keep this trial if it's better
if current_objective < best_objective:
U_best = self.U
best_objective = current_objective
# save the objective value for statistics
results.append({'converged' : converged, 'objective' : current_objective,
'n_init' : n, 'n_ROS' : nros, 'iter' : ctr , 'init_ind' : np.copy(self.I)})
if self.true_labels is not None:
mislabeled = self.labeling_error(self.true_labels)
results[-1]['mislabeled'] = mislabeled
# overwrite self.U with the best one
self.U = np.ascontiguousarray(U_best)
self.inertia_ = best_objective
self.results = results
self.postprocess()
def fit_single_trial(self):
""" Fit the model using a single initial guess. Initializes
the centroids and iterates throug the kmeans algorithm until
convergence (assignments stop changing) or maximum iterations have been
reached."""
## below should now be done in C
# zero out the existing data
# self.cfn.overwrite_double(np.zeros(len(self.U)), self.U, len(self.U))
# self.cfn.overwrite_int(np.zeros(len(self.C), dtype='int32'), self.C, len(self.C))
# initialize the centroids
self.initialize_centroids()
# update the assignments
self.update_assignments()
# iterate through the kmeans algorithm until assignments stop changing
# or until we've hit the maximum number of iterations.
converged = False
ctr = 0
while (converged == False and ctr < self.max_iter):
self.update_centroids()
C_old = np.copy(self.C)
self.update_assignments()
if np.all(C_old==self.C):
converged = True
ctr += 1
return [converged, ctr]
# core algorithm routines
def seedrand(self, number):
""" Seed the random number generators in the C executable and numpy.
Optional; primarily for debugging. """
# set the seed for the C functions
self.cfn.seedrand(number)
# set the seed for numpy
np.random.seed(number)
def initialize_centroids(self):
""" Initialize the centroid guesses. """
if self.verbose:
print("initializing centroids")
if type(self.init) is np.ndarray:
self.cfn.overwrite_int(np.sort(self.init), self.I, len(self.I))
U = self.HDX[list(self.I)].flatten()
elif self.init == 'k-means++':
if self.init_resparsify:
"""Maybe redesign this later - a bit ugly. We want to use a different
sparsification for the intialization, but to pass this to C we need it set
as an attribute. To do so we compute the new one, save the old one,
overwrite the attribute with the new one, call the C code, and then
overwrite the attribute again with the original. """
col_inds = np.sort(np.random.choice(range(self.P), self.M, replace = False))
HDX_sub_init = self.HDX[:,col_inds].flatten()
HDX_sub_original = np.copy(self.HDX_sub)
if self.verbose:
print("Overwriting HDX_sub with new initialization")
self.cfn.overwrite_double(HDX_sub_init, self.HDX_sub, len(self.HDX_sub))
if self.verbose:
print("Calling C kmpp init after overwrite")
self.cfn.initialize_centroids_kmpp(self.CONST, self.D)
U = self.U
if self.verbose:
print("Overwriting HDX_sub with the original. ")
self.cfn.overwrite_double(HDX_sub_original, self.HDX_sub, len(self.HDX_sub))
else:
if self.verbose:
print("Calling C kmpp init (no overwrite)")
self.cfn.initialize_centroids_kmpp(self.CONST, self.D)
if self.verbose:
print("I've returned C's kmpp")
U = self.U
if self.full_init:
# then overwrite the masked initial means with the corresponding
# full entries from HDX
U = np.array([self.HDX[k] for k in self.I]).flatten()
elif self.init == 'random':
indices = list(np.random.choice(self.N, self.K, replace = False))
indices.sort()
self.cfn.overwrite_int(np.array(indices, dtype = 'int32'), self.I, len(self.I))
U = np.array([self.HDX[k] for k in indices]).flatten()
# now overwrite U with whatever we got from the preceding
self.cfn.overwrite_double(U, self.U, len(self.U))
#mv = memoryview(self.U)
#new_centroids = self.X[:].flatten()
#for i in range(mv.shape[0]):
# mv[i] = new_centroids[i]
def update_assignments(self):
""" C wrapper, update the datapoint assignments based on new centroid
means. """
#if self.verbose:
# print("Updating assignments.")
self.cfn.update_assignments(self.CONST, self.D)
def update_centroids(self):
""" C wrapper, Update the centroids based on new data assignments. """
#if self.verbose:
# print("Updating centroids.")
self.cfn.update_centroids(self.CONST, self.D)
def evaluate_objective(self):
""" Evaluate the masked objective function for the given fit self.U"""
d = self.cfn.evaluate_objective(self.CONST, self.D)
return d
# post-processing routines
def postprocess(self):
""" Wrapper for 1pass and 2pass post-processing routines. Undoes the
transformation to the means. """
if self.n_passes == 1 and self.use_ROS:
self.postprocess_1pass()
elif self.n_passes == 2 and self.use_ROS:
self.postprocess_2pass()
else: raise Exception("n_passes must be 1 or 2; it's {}".format(self.n_passes))
# reformat and rename centroids for compatibility with sklearn
self.cluster_centers_ = self.U.reshape(self.K, self.P)
# rename centroid labels for compatibility with sklearn
self.labels_ = self.C
def postprocess_1pass(self):
""" Undo the preprocessing transforms to U. Equivalent to applying
D^-1 H^-1 to U."""
# reshape the array
self.U = self.U.reshape(self.K,self.P)
# apply H^-1
self.U = idct(self.U, axis=1, norm='ortho', overwrite_x = True)
# apply D^-1
self.U[:,self.D_indices] *= -1
self.U = self.U.flatten()
def postprocess_2pass(self):
""" Recompute the centroids U by taking a second pass through the dense
data-set. Alternative to postprocess_1pass, more accurate but requires
a second pass through the full dataset. """
# CAVEAT: Once load_data is fixed to permit block-loading, we'll have
# to change this function too.
X_dense = np.ascontiguousarray(self.X[:], dtype = float)
self.cfn.update_centroids_2pass(self.CONST, self.D, X_dense)
def labeling_error(self, true_labels):
errors = [0 for k in range(self.K)]
for k in range(self.K):
cluster_members = np.where(self.C == k)[0]
counts = list(Counter(true_labels[cluster_members]).values())
if counts:
errors[k] = sum(counts) - max(counts)
else:
errors[k] = 0
fractional_error = sum(errors)/self.N
return fractional_error
|
|
# Copyright 2008 by Michiel de Hoon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Parser for XML results returned by NCBI's Entrez Utilities. This
parser is used by the read() function in Bio.Entrez, and is not intended
be used directly.
"""
# The question is how to represent an XML file as Python objects. Some
# XML files returned by NCBI look like lists, others look like dictionaries,
# and others look like a mix of lists and dictionaries.
#
# My approach is to classify each possible element in the XML as a plain
# string, an integer, a list, a dictionary, or a structure. The latter is a
# dictionary where the same key can occur multiple times; in Python, it is
# represented as a dictionary where that key occurs once, pointing to a list
# of values found in the XML file.
#
# The parser then goes through the XML and creates the appropriate Python
# object for each element. The different levels encountered in the XML are
# preserved on the Python side. So a subelement of a subelement of an element
# is a value in a dictionary that is stored in a list which is a value in
# some other dictionary (or a value in a list which itself belongs to a list
# which is a value in a dictionary, and so on). Attributes encountered in
# the XML are stored as a dictionary in a member .attributes of each element,
# and the tag name is saved in a member .tag.
#
# To decide which kind of Python object corresponds to each element in the
# XML, the parser analyzes the DTD referred at the top of (almost) every
# XML file returned by the Entrez Utilities. This is preferred over a hand-
# written solution, since the number of DTDs is rather large and their
# contents may change over time. About half the code in this parser deals
# wih parsing the DTD, and the other half with the XML itself.
import os.path
import urlparse
import urllib
import warnings
from xml.parsers import expat
# The following four classes are used to add a member .attributes to integers,
# strings, lists, and dictionaries, respectively.
class IntegerElement(int):
def __repr__(self):
text = int.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "IntegerElement(%s, attributes=%s)" % (text, repr(attributes))
class StringElement(str):
def __repr__(self):
text = str.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "StringElement(%s, attributes=%s)" % (text, repr(attributes))
class UnicodeElement(unicode):
def __repr__(self):
text = unicode.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "UnicodeElement(%s, attributes=%s)" % (text, repr(attributes))
class ListElement(list):
def __repr__(self):
text = list.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "ListElement(%s, attributes=%s)" % (text, repr(attributes))
class DictionaryElement(dict):
def __repr__(self):
text = dict.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "DictElement(%s, attributes=%s)" % (text, repr(attributes))
# A StructureElement is like a dictionary, but some of its keys can have
# multiple values associated with it. These values are stored in a list
# under each key.
class StructureElement(dict):
def __init__(self, keys):
dict.__init__(self)
for key in keys:
dict.__setitem__(self, key, [])
self.listkeys = keys
def __setitem__(self, key, value):
if key in self.listkeys:
self[key].append(value)
else:
dict.__setitem__(self, key, value)
def __repr__(self):
text = dict.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "DictElement(%s, attributes=%s)" % (text, repr(attributes))
class NotXMLError(ValueError):
def __init__(self, message):
self.msg = message
def __str__(self):
return "Failed to parse the XML data (%s). Please make sure that the input data are in XML format." % self.msg
class CorruptedXMLError(ValueError):
def __init__(self, message):
self.msg = message
def __str__(self):
return "Failed to parse the XML data (%s). Please make sure that the input data are not corrupted." % self.msg
class ValidationError(ValueError):
"""Validating parsers raise this error if the parser finds a tag in the XML that is not defined in the DTD. Non-validating parsers do not raise this error. The Bio.Entrez.read and Bio.Entrez.parse functions use validating parsers by default (see those functions for more information)"""
def __init__(self, name):
self.name = name
def __str__(self):
return "Failed to find tag '%s' in the DTD. To skip all tags that are not represented in the DTD, please call Bio.Entrez.read or Bio.Entrez.parse with validate=False." % self.name
class DataHandler(object):
home = os.path.expanduser('~')
local_dtd_dir = os.path.join(home, '.biopython', 'Bio', 'Entrez', 'DTDs')
del home
from Bio import Entrez
global_dtd_dir = os.path.join(str(Entrez.__path__[0]), "DTDs")
del Entrez
def __init__(self, validate):
self.stack = []
self.errors = []
self.integers = []
self.strings = []
self.lists = []
self.dictionaries = []
self.structures = {}
self.items = []
self.dtd_urls = []
self.validating = validate
self.parser = expat.ParserCreate(namespace_separator=" ")
self.parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
self.parser.XmlDeclHandler = self.xmlDeclHandler
def read(self, handle):
"""Set up the parser and let it parse the XML results"""
try:
self.parser.ParseFile(handle)
except expat.ExpatError, e:
if self.parser.StartElementHandler:
# We saw the initial <!xml declaration, so we can be sure that
# we are parsing XML data. Most likely, the XML file is
# corrupted.
raise CorruptedXMLError(e)
else:
# We have not seen the initial <!xml declaration, so probably
# the input data is not in XML format.
raise NotXMLError(e)
try:
return self.object
except AttributeError:
if self.parser.StartElementHandler:
# We saw the initial <!xml declaration, and expat didn't notice
# any errors, so self.object should be defined. If not, this is
# a bug.
raise RuntimeError("Failed to parse the XML file correctly, possibly due to a bug in Bio.Entrez. Please contact the Biopython developers at [email protected] for assistance.")
else:
# We did not see the initial <!xml declaration, so probably
# the input data is not in XML format.
raise NotXMLError("XML declaration not found")
def parse(self, handle):
BLOCK = 1024
while True:
#Read in another block of the file...
text = handle.read(BLOCK)
if not text:
# We have reached the end of the XML file
if self.stack:
# No more XML data, but there is still some unfinished
# business
raise CorruptedXMLError
try:
for record in self.object:
yield record
except AttributeError:
if self.parser.StartElementHandler:
# We saw the initial <!xml declaration, and expat
# didn't notice any errors, so self.object should be
# defined. If not, this is a bug.
raise RuntimeError("Failed to parse the XML file correctly, possibly due to a bug in Bio.Entrez. Please contact the Biopython developers at [email protected] for assistance.")
else:
# We did not see the initial <!xml declaration, so
# probably the input data is not in XML format.
raise NotXMLError("XML declaration not found")
self.parser.Parse("", True)
self.parser = None
return
try:
self.parser.Parse(text, False)
except expat.ExpatError, e:
if self.parser.StartElementHandler:
# We saw the initial <!xml declaration, so we can be sure
# that we are parsing XML data. Most likely, the XML file
# is corrupted.
raise CorruptedXMLError(e)
else:
# We have not seen the initial <!xml declaration, so
# probably the input data is not in XML format.
raise NotXMLError(e)
if not self.stack:
# Haven't read enough from the XML file yet
continue
records = self.stack[0]
if not isinstance(records, list):
raise ValueError("The XML file does not represent a list. Please use Entrez.read instead of Entrez.parse")
while len(records) > 1: # Then the top record is finished
record = records.pop(0)
yield record
def xmlDeclHandler(self, version, encoding, standalone):
# XML declaration found; set the handlers
self.parser.StartElementHandler = self.startElementHandler
self.parser.EndElementHandler = self.endElementHandler
self.parser.CharacterDataHandler = self.characterDataHandler
self.parser.ExternalEntityRefHandler = self.externalEntityRefHandler
self.parser.StartNamespaceDeclHandler = self.startNamespaceDeclHandler
def startNamespaceDeclHandler(self, prefix, un):
raise NotImplementedError("The Bio.Entrez parser cannot handle XML data that make use of XML namespaces")
def startElementHandler(self, name, attrs):
self.content = ""
if name in self.lists:
object = ListElement()
elif name in self.dictionaries:
object = DictionaryElement()
elif name in self.structures:
object = StructureElement(self.structures[name])
elif name in self.items: # Only appears in ESummary
name = str(attrs["Name"]) # convert from Unicode
del attrs["Name"]
itemtype = str(attrs["Type"]) # convert from Unicode
del attrs["Type"]
if itemtype=="Structure":
object = DictionaryElement()
elif name in ("ArticleIds", "History"):
object = StructureElement(["pubmed", "medline"])
elif itemtype=="List":
object = ListElement()
else:
object = StringElement()
object.itemname = name
object.itemtype = itemtype
elif name in self.strings + self.errors + self.integers:
self.attributes = attrs
return
else:
# Element not found in DTD
if self.validating:
raise ValidationError(name)
else:
# this will not be stored in the record
object = ""
if object!="":
object.tag = name
if attrs:
object.attributes = dict(attrs)
if len(self.stack)!=0:
current = self.stack[-1]
try:
current.append(object)
except AttributeError:
current[name] = object
self.stack.append(object)
def endElementHandler(self, name):
value = self.content
if name in self.errors:
if value=="":
return
else:
raise RuntimeError(value)
elif name in self.integers:
value = IntegerElement(value)
elif name in self.strings:
# Convert Unicode strings to plain strings if possible
try:
value = StringElement(value)
except UnicodeEncodeError:
value = UnicodeElement(value)
elif name in self.items:
self.object = self.stack.pop()
if self.object.itemtype in ("List", "Structure"):
return
elif self.object.itemtype=="Integer" and value:
value = IntegerElement(value)
else:
# Convert Unicode strings to plain strings if possible
try:
value = StringElement(value)
except UnicodeEncodeError:
value = UnicodeElement(value)
name = self.object.itemname
else:
self.object = self.stack.pop()
return
value.tag = name
if self.attributes:
value.attributes = dict(self.attributes)
del self.attributes
current = self.stack[-1]
if current!="":
try:
current.append(value)
except AttributeError:
current[name] = value
def characterDataHandler(self, content):
self.content += content
def elementDecl(self, name, model):
"""This callback function is called for each element declaration:
<!ELEMENT name (...)>
encountered in a DTD. The purpose of this function is to determine
whether this element should be regarded as a string, integer, list
dictionary, structure, or error."""
if name.upper()=="ERROR":
self.errors.append(name)
return
if name=='Item' and model==(expat.model.XML_CTYPE_MIXED,
expat.model.XML_CQUANT_REP,
None, ((expat.model.XML_CTYPE_NAME,
expat.model.XML_CQUANT_NONE,
'Item',
()
),
)
):
# Special case. As far as I can tell, this only occurs in the
# eSummary DTD.
self.items.append(name)
return
# First, remove ignorable parentheses around declarations
while (model[0] in (expat.model.XML_CTYPE_SEQ,
expat.model.XML_CTYPE_CHOICE)
and model[1] in (expat.model.XML_CQUANT_NONE,
expat.model.XML_CQUANT_OPT)
and len(model[3])==1):
model = model[3][0]
# PCDATA declarations correspond to strings
if model[0] in (expat.model.XML_CTYPE_MIXED,
expat.model.XML_CTYPE_EMPTY):
self.strings.append(name)
return
# List-type elements
if (model[0] in (expat.model.XML_CTYPE_CHOICE,
expat.model.XML_CTYPE_SEQ) and
model[1] in (expat.model.XML_CQUANT_PLUS,
expat.model.XML_CQUANT_REP)):
self.lists.append(name)
return
# This is the tricky case. Check which keys can occur multiple
# times. If only one key is possible, and it can occur multiple
# times, then this is a list. If more than one key is possible,
# but none of them can occur multiple times, then this is a
# dictionary. Otherwise, this is a structure.
# In 'single' and 'multiple', we keep track which keys can occur
# only once, and which can occur multiple times.
single = []
multiple = []
# The 'count' function is called recursively to make sure all the
# children in this model are counted. Error keys are ignored;
# they raise an exception in Python.
def count(model):
quantifier, name, children = model[1:]
if name==None:
if quantifier in (expat.model.XML_CQUANT_PLUS,
expat.model.XML_CQUANT_REP):
for child in children:
multiple.append(child[2])
else:
for child in children:
count(child)
elif name.upper()!="ERROR":
if quantifier in (expat.model.XML_CQUANT_NONE,
expat.model.XML_CQUANT_OPT):
single.append(name)
elif quantifier in (expat.model.XML_CQUANT_PLUS,
expat.model.XML_CQUANT_REP):
multiple.append(name)
count(model)
if len(single)==0 and len(multiple)==1:
self.lists.append(name)
elif len(multiple)==0:
self.dictionaries.append(name)
else:
self.structures.update({name: multiple})
def open_dtd_file(self, filename):
path = os.path.join(DataHandler.local_dtd_dir, filename)
try:
handle = open(path, "rb")
except IOError:
pass
else:
return handle
path = os.path.join(DataHandler.global_dtd_dir, filename)
try:
handle = open(path, "rb")
except IOError:
pass
else:
return handle
return None
def externalEntityRefHandler(self, context, base, systemId, publicId):
"""The purpose of this function is to load the DTD locally, instead
of downloading it from the URL specified in the XML. Using the local
DTD results in much faster parsing. If the DTD is not found locally,
we try to download it. If new DTDs become available from NCBI,
putting them in Bio/Entrez/DTDs will allow the parser to see them."""
urlinfo = urlparse.urlparse(systemId)
#Following attribute requires Python 2.5+
#if urlinfo.scheme=='http':
if urlinfo[0]=='http':
# Then this is an absolute path to the DTD.
url = systemId
elif urlinfo[0]=='':
# Then this is a relative path to the DTD.
# Look at the parent URL to find the full path.
try:
url = self.dtd_urls[-1]
except IndexError:
# Assume the default URL for DTDs if the top parent
# does not contain an absolute path
source = "http://www.ncbi.nlm.nih.gov/dtd/"
else:
source = os.path.dirname(url)
url = os.path.join(source, systemId)
self.dtd_urls.append(url)
# First, try to load the local version of the DTD file
location, filename = os.path.split(systemId)
handle = self.open_dtd_file(filename)
if not handle:
# DTD is not available as a local file. Try accessing it through
# the internet instead.
message = """\
Unable to load DTD file %s.
Bio.Entrez uses NCBI's DTD files to parse XML files returned by NCBI Entrez.
Though most of NCBI's DTD files are included in the Biopython distribution,
sometimes you may find that a particular DTD file is missing. While we can
access the DTD file through the internet, the parser is much faster if the
required DTD files are available locally.
For this purpose, please download %s from
%s
and save it either in directory
%s
or in directory
%s
in order for Bio.Entrez to find it.
Alternatively, you can save %s in the directory
Bio/Entrez/DTDs in the Biopython distribution, and reinstall Biopython.
Please also inform the Biopython developers about this missing DTD, by
reporting a bug on http://bugzilla.open-bio.org/ or sign up to our mailing
list and emailing us, so that we can include it with the next release of
Biopython.
Proceeding to access the DTD file through the internet...
""" % (filename, filename, url, self.global_dtd_dir, self.local_dtd_dir, filename)
warnings.warn(message)
try:
handle = urllib.urlopen(url)
except IOError:
raise RuntimeException("Failed to access %s at %s" % (filename, url))
parser = self.parser.ExternalEntityParserCreate(context)
parser.ElementDeclHandler = self.elementDecl
parser.ParseFile(handle)
handle.close()
self.dtd_urls.pop()
return 1
|
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import abc
import six
__all__ = (
'IObjectSerializer',
'ISerializer',
'ITransport',
'ITransportHandler',
'ISession',
'IApplicationSession',
)
@six.add_metaclass(abc.ABCMeta)
class IObjectSerializer(object):
"""
Raw Python object serialization and deserialization. Object serializers are
used by classes implementing WAMP serializers, that is instances of
:class:`autobahn.wamp.interfaces.ISerializer`.
"""
@abc.abstractproperty
def BINARY(self):
"""
Flag (read-only) to indicate if serializer requires a binary clean
transport or if UTF8 transparency is sufficient.
"""
@abc.abstractmethod
def serialize(self, obj):
"""
Serialize an object to a byte string.
:param obj: Object to serialize.
:type obj: Any serializable type.
:returns: bytes -- Serialized byte string.
"""
@abc.abstractmethod
def unserialize(self, payload):
"""
Unserialize objects from a byte string.
:param payload: Objects to unserialize.
:type payload: bytes
:returns: list -- List of (raw) objects unserialized.
"""
@six.add_metaclass(abc.ABCMeta)
class ISerializer(object):
"""
WAMP message serialization and deserialization.
"""
@abc.abstractproperty
def MESSAGE_TYPE_MAP(self):
"""
Mapping of WAMP message type codes to WAMP message classes.
"""
@abc.abstractproperty
def SERIALIZER_ID(self):
"""
The WAMP serialization format ID.
"""
@abc.abstractmethod
def serialize(self, message):
"""
Serializes a WAMP message to bytes for sending over a transport.
:param message: An instance that implements :class:`autobahn.wamp.interfaces.IMessage`
:type message: obj
:returns: tuple -- A pair ``(payload, is_binary)``.
"""
@abc.abstractmethod
def unserialize(self, payload, is_binary):
"""
Deserialize bytes from a transport and parse into WAMP messages.
:param payload: Byte string from wire.
:type payload: bytes
:param is_binary: Type of payload. True if payload is a binary string, else
the payload is UTF-8 encoded Unicode text.
:type is_binary: bool
:returns: list -- List of ``a.w.m.Message`` objects.
"""
@six.add_metaclass(abc.ABCMeta)
class ITransport(object):
"""
A WAMP transport is a bidirectional, full-duplex, reliable, ordered,
message-based channel.
"""
@abc.abstractmethod
def send(self, message):
"""
Send a WAMP message over the transport to the peer. If the transport is
not open, this raises :class:`autobahn.wamp.exception.TransportLost`.
Returns a deferred/future when the message has been processed and more
messages may be sent. When send() is called while a previous deferred/future
has not yet fired, the send will fail immediately.
:param message: An instance that implements :class:`autobahn.wamp.interfaces.IMessage`
:type message: obj
:returns: obj -- A Deferred/Future
"""
@abc.abstractmethod
def is_open(self):
"""
Check if the transport is open for messaging.
:returns: bool -- ``True``, if the transport is open.
"""
@abc.abstractmethod
def close(self):
"""
Close the transport regularly. The transport will perform any
closing handshake if applicable. This should be used for any
application initiated closing.
"""
@abc.abstractmethod
def abort(self):
"""
Abort the transport abruptly. The transport will be destroyed as
fast as possible, and without playing nice to the peer. This should
only be used in case of fatal errors, protocol violations or possible
detected attacks.
"""
@abc.abstractmethod
def get_channel_id(self):
"""
Return the unique channel ID of the underlying transport. This is used to
mitigate credential forwarding man-in-the-middle attacks when running
application level authentication (eg WAMP-cryptosign) which are decoupled
from the underlying transport.
The channel ID is only available when running over TLS (either WAMP-WebSocket
or WAMP-RawSocket). It is not available for non-TLS transports (plain TCP or
Unix domain sockets). It is also not available for WAMP-over-HTTP/Longpoll.
Further, it is currently unimplemented for asyncio (only works on Twisted).
The channel ID is computed as follows:
- for a client, the SHA256 over the "TLS Finished" message sent by the client
to the server is returned.
- for a server, the SHA256 over the "TLS Finished" message the server expected
the client to send
Note: this is similar to `tls-unique` as described in RFC5929, but instead
of returning the raw "TLS Finished" message, it returns a SHA256 over such a
message. The reason is that we use the channel ID mainly with WAMP-cryptosign,
which is based on Ed25519, where keys are always 32 bytes. And having a channel ID
which is always 32 bytes (independent of the TLS ciphers/hashfuns in use) allows
use to easily XOR channel IDs with Ed25519 keys and WAMP-cryptosign challenges.
WARNING: For safe use of this (that is, for safely binding app level authentication
to the underlying transport), you MUST use TLS, and you SHOULD deactivate both
TLS session renegotiation and TLS session resumption.
References:
- https://tools.ietf.org/html/rfc5056
- https://tools.ietf.org/html/rfc5929
- http://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Connection.get_finished
- http://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Connection.get_peer_finished
:returns: The channel ID (if available) of the underlying WAMP transport. The
channel ID is a 32 bytes value.
:rtype: binary or None
"""
@six.add_metaclass(abc.ABCMeta)
class ITransportHandler(object):
@abc.abstractproperty
def transport(self):
"""
When the transport this handler is attached to is currently open, this property
can be read from. The property should be considered read-only. When the transport
is gone, this property is set to None.
"""
@abc.abstractmethod
def on_open(self, transport):
"""
Callback fired when transport is open. May run asynchronously. The transport
is considered running and is_open() would return true, as soon as this callback
has completed successfully.
:param transport: An instance that implements :class:`autobahn.wamp.interfaces.ITransport`
:type transport: obj
"""
@abc.abstractmethod
def on_message(self, message):
"""
Callback fired when a WAMP message was received. May run asynchronously. The callback
should return or fire the returned deferred/future when it's done processing the message.
In particular, an implementation of this callback must not access the message afterwards.
:param message: An instance that implements :class:`autobahn.wamp.interfaces.IMessage`
:type message: obj
"""
@abc.abstractmethod
def on_close(self, was_clean):
"""
Callback fired when the transport has been closed.
:param was_clean: Indicates if the transport has been closed regularly.
:type was_clean: bool
"""
@six.add_metaclass(abc.ABCMeta)
class ISession(object):
"""
Base interface for WAMP sessions.
"""
@abc.abstractmethod
def on_connect(self):
"""
Callback fired when the transport this session will run over has been established.
"""
@abc.abstractmethod
def join(self, realm):
"""
Attach the session to the given realm. A session is open as soon as it is attached to a realm.
"""
@abc.abstractmethod
def on_challenge(self, challenge):
"""
Callback fired when the peer demands authentication.
May return a Deferred/Future.
:param challenge: The authentication challenge.
:type challenge: Instance of :class:`autobahn.wamp.types.Challenge`.
"""
@abc.abstractmethod
def on_join(self, details):
"""
Callback fired when WAMP session has been established.
May return a Deferred/Future.
:param details: Session information.
:type details: Instance of :class:`autobahn.wamp.types.SessionDetails`.
"""
@abc.abstractmethod
def leave(self, reason=None, message=None):
"""
Actively close this WAMP session.
:param reason: An optional URI for the closing reason. If you
want to permanently log out, this should be `wamp.close.logout`
:type reason: str
:param message: An optional (human readable) closing message, intended for
logging purposes.
:type message: str
:return: may return a Future/Deferred that fires when we've disconnected
"""
@abc.abstractmethod
def on_leave(self, details):
"""
Callback fired when WAMP session has is closed
:param details: Close information.
:type details: Instance of :class:`autobahn.wamp.types.CloseDetails`.
"""
@abc.abstractmethod
def disconnect(self):
"""
Close the underlying transport.
"""
@abc.abstractmethod
def is_connected(self):
"""
Check if the underlying transport is connected.
"""
@abc.abstractmethod
def is_attached(self):
"""
Check if the session has currently joined a realm.
"""
@abc.abstractmethod
def on_disconnect(self):
"""
Callback fired when underlying transport has been closed.
"""
@six.add_metaclass(abc.ABCMeta)
class IApplicationSession(ISession):
"""
Interface for WAMP client peers implementing the four different
WAMP roles (caller, callee, publisher, subscriber).
"""
@abc.abstractmethod
def define(self, exception, error=None):
"""
Defines an exception for a WAMP error in the context of this WAMP session.
:param exception: The exception class to define an error mapping for.
:type exception: A class that derives of ``Exception``.
:param error: The URI (or URI pattern) the exception class should be mapped for.
Iff the ``exception`` class is decorated, this must be ``None``.
:type error: str
"""
@abc.abstractmethod
def call(self, procedure, *args, **kwargs):
"""
Call a remote procedure.
This will return a Deferred/Future, that when resolved, provides the actual result
returned by the called remote procedure.
- If the result is a single positional return value, it'll be returned "as-is".
- If the result contains multiple positional return values or keyword return values,
the result is wrapped in an instance of :class:`autobahn.wamp.types.CallResult`.
- If the call fails, the returned Deferred/Future will be rejected with an instance
of :class:`autobahn.wamp.exception.ApplicationError`.
If ``kwargs`` contains an ``options`` keyword argument that is an instance of
:class:`autobahn.wamp.types.CallOptions`, this will provide specific options for
the call to perform.
When the *Caller* and *Dealer* implementations support canceling of calls, the call may
be canceled by canceling the returned Deferred/Future.
:param procedure: The URI of the remote procedure to be called, e.g. ``u"com.myapp.hello"``.
:type procedure: unicode
:param args: Any positional arguments for the call.
:type args: list
:param kwargs: Any keyword arguments for the call.
:type kwargs: dict
:returns: A Deferred/Future for the call result -
:rtype: instance of :tx:`twisted.internet.defer.Deferred` / :py:class:`asyncio.Future`
"""
@abc.abstractmethod
def register(self, endpoint, procedure=None, options=None):
"""
Register a procedure for remote calling.
When ``endpoint`` is a callable (function, method or object that implements ``__call__``),
then ``procedure`` must be provided and an instance of
:tx:`twisted.internet.defer.Deferred` (when running on **Twisted**) or an instance
of :py:class:`asyncio.Future` (when running on **asyncio**) is returned.
- If the registration *succeeds* the returned Deferred/Future will *resolve* to
an object that implements :class:`autobahn.wamp.interfaces.IRegistration`.
- If the registration *fails* the returned Deferred/Future will *reject* with an
instance of :class:`autobahn.wamp.exception.ApplicationError`.
When ``endpoint`` is an object, then each of the object's methods that is decorated
with :func:`autobahn.wamp.register` is automatically registered and a (single)
DeferredList or Future is returned that gathers all individual underlying Deferreds/Futures.
:param endpoint: The endpoint called under the procedure.
:type endpoint: callable or object
:param procedure: When ``endpoint`` is a callable, the URI (or URI pattern)
of the procedure to register for. When ``endpoint`` is an object,
the argument is ignored (and should be ``None``).
:type procedure: unicode
:param options: Options for registering.
:type options: instance of :class:`autobahn.wamp.types.RegisterOptions`.
:returns: A registration or a list of registrations (or errors)
:rtype: instance(s) of :tx:`twisted.internet.defer.Deferred` / :py:class:`asyncio.Future`
"""
@abc.abstractmethod
def publish(self, topic, *args, **kwargs):
"""
Publish an event to a topic.
If ``kwargs`` contains an ``options`` keyword argument that is an instance of
:class:`autobahn.wamp.types.PublishOptions`, this will provide
specific options for the publish to perform.
.. note::
By default, publications are non-acknowledged and the publication can
fail silently, e.g. because the session is not authorized to publish
to the topic.
When publication acknowledgement is requested via ``options.acknowledge == True``,
this function returns a Deferred/Future:
- If the publication succeeds the Deferred/Future will resolve to an object
that implements :class:`autobahn.wamp.interfaces.IPublication`.
- If the publication fails the Deferred/Future will reject with an instance
of :class:`autobahn.wamp.exception.ApplicationError`.
:param topic: The URI of the topic to publish to, e.g. ``u"com.myapp.mytopic1"``.
:type topic: unicode
:param args: Arbitrary application payload for the event (positional arguments).
:type args: list
:param kwargs: Arbitrary application payload for the event (keyword arguments).
:type kwargs: dict
:returns: Acknowledgement for acknowledge publications - otherwise nothing.
:rtype: ``None`` or instance of :tx:`twisted.internet.defer.Deferred` / :py:class:`asyncio.Future`
"""
@abc.abstractmethod
def subscribe(self, handler, topic=None, options=None):
"""
Subscribe to a topic for receiving events.
When ``handler`` is a callable (function, method or object that implements ``__call__``),
then `topic` must be provided and an instance of
:tx:`twisted.internet.defer.Deferred` (when running on **Twisted**) or an instance
of :class:`asyncio.Future` (when running on **asyncio**) is returned.
- If the subscription succeeds the Deferred/Future will resolve to an object
that implements :class:`autobahn.wamp.interfaces.ISubscription`.
- If the subscription fails the Deferred/Future will reject with an instance
of :class:`autobahn.wamp.exception.ApplicationError`.
When ``handler`` is an object, then each of the object's methods that is decorated
with :func:`autobahn.wamp.subscribe` is automatically subscribed as event handlers,
and a list of Deferreds/Futures is returned that each resolves or rejects as above.
:param handler: The event handler to receive events.
:type handler: callable or object
:param topic: When ``handler`` is a callable, the URI (or URI pattern)
of the topic to subscribe to. When ``handler`` is an object, this
value is ignored (and should be ``None``).
:type topic: unicode
:param options: Options for subscribing.
:type options: An instance of :class:`autobahn.wamp.types.SubscribeOptions`.
:returns: A single Deferred/Future or a list of such objects
:rtype: instance(s) of :tx:`twisted.internet.defer.Deferred` / :py:class:`asyncio.Future`
"""
|
|
"""
Title: CycleGAN
Author: [A_K_Nain](https://twitter.com/A_K_Nain)
Date created: 2020/08/12
Last modified: 2020/08/12
Description: Implementation of CycleGAN.
"""
"""
## CycleGAN
CycleGAN is a model that aims to solve the image-to-image translation
problem. The goal of the image-to-image translation problem is to learn the
mapping between an input image and an output image using a training set of
aligned image pairs. However, obtaining paired examples isn't always feasible.
CycleGAN tries to learn this mapping without requiring paired input-output images,
using cycle-consistent adversarial networks.
- [Paper](https://arxiv.org/pdf/1703.10593.pdf)
- [Original implementation](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix)
"""
"""
## Setup
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_addons as tfa
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
autotune = tf.data.AUTOTUNE
"""
## Prepare the dataset
In this example, we will be using the
[horse to zebra](https://www.tensorflow.org/datasets/catalog/cycle_gan#cycle_ganhorse2zebra)
dataset.
"""
# Load the horse-zebra dataset using tensorflow-datasets.
dataset, _ = tfds.load("cycle_gan/horse2zebra", with_info=True, as_supervised=True)
train_horses, train_zebras = dataset["trainA"], dataset["trainB"]
test_horses, test_zebras = dataset["testA"], dataset["testB"]
# Define the standard image size.
orig_img_size = (286, 286)
# Size of the random crops to be used during training.
input_img_size = (256, 256, 3)
# Weights initializer for the layers.
kernel_init = keras.initializers.RandomNormal(mean=0.0, stddev=0.02)
# Gamma initializer for instance normalization.
gamma_init = keras.initializers.RandomNormal(mean=0.0, stddev=0.02)
buffer_size = 256
batch_size = 1
def normalize_img(img):
img = tf.cast(img, dtype=tf.float32)
# Map values in the range [-1, 1]
return (img / 127.5) - 1.0
def preprocess_train_image(img, label):
# Random flip
img = tf.image.random_flip_left_right(img)
# Resize to the original size first
img = tf.image.resize(img, [*orig_img_size])
# Random crop to 256X256
img = tf.image.random_crop(img, size=[*input_img_size])
# Normalize the pixel values in the range [-1, 1]
img = normalize_img(img)
return img
def preprocess_test_image(img, label):
# Only resizing and normalization for the test images.
img = tf.image.resize(img, [input_img_size[0], input_img_size[1]])
img = normalize_img(img)
return img
"""
## Create `Dataset` objects
"""
# Apply the preprocessing operations to the training data
train_horses = (
train_horses.map(preprocess_train_image, num_parallel_calls=autotune)
.cache()
.shuffle(buffer_size)
.batch(batch_size)
)
train_zebras = (
train_zebras.map(preprocess_train_image, num_parallel_calls=autotune)
.cache()
.shuffle(buffer_size)
.batch(batch_size)
)
# Apply the preprocessing operations to the test data
test_horses = (
test_horses.map(preprocess_test_image, num_parallel_calls=autotune)
.cache()
.shuffle(buffer_size)
.batch(batch_size)
)
test_zebras = (
test_zebras.map(preprocess_test_image, num_parallel_calls=autotune)
.cache()
.shuffle(buffer_size)
.batch(batch_size)
)
"""
## Visualize some samples
"""
_, ax = plt.subplots(4, 2, figsize=(10, 15))
for i, samples in enumerate(zip(train_horses.take(4), train_zebras.take(4))):
horse = (((samples[0][0] * 127.5) + 127.5).numpy()).astype(np.uint8)
zebra = (((samples[1][0] * 127.5) + 127.5).numpy()).astype(np.uint8)
ax[i, 0].imshow(horse)
ax[i, 1].imshow(zebra)
plt.show()
"""
## Building blocks used in the CycleGAN generators and discriminators
"""
class ReflectionPadding2D(layers.Layer):
"""Implements Reflection Padding as a layer.
Args:
padding(tuple): Amount of padding for the
spatial dimensions.
Returns:
A padded tensor with the same type as the input tensor.
"""
def __init__(self, padding=(1, 1), **kwargs):
self.padding = tuple(padding)
super(ReflectionPadding2D, self).__init__(**kwargs)
def call(self, input_tensor, mask=None):
padding_width, padding_height = self.padding
padding_tensor = [
[0, 0],
[padding_height, padding_height],
[padding_width, padding_width],
[0, 0],
]
return tf.pad(input_tensor, padding_tensor, mode="REFLECT")
def residual_block(
x,
activation,
kernel_initializer=kernel_init,
kernel_size=(3, 3),
strides=(1, 1),
padding="valid",
gamma_initializer=gamma_init,
use_bias=False,
):
dim = x.shape[-1]
input_tensor = x
x = ReflectionPadding2D()(input_tensor)
x = layers.Conv2D(
dim,
kernel_size,
strides=strides,
kernel_initializer=kernel_initializer,
padding=padding,
use_bias=use_bias,
)(x)
x = tfa.layers.InstanceNormalization(gamma_initializer=gamma_initializer)(x)
x = activation(x)
x = ReflectionPadding2D()(x)
x = layers.Conv2D(
dim,
kernel_size,
strides=strides,
kernel_initializer=kernel_initializer,
padding=padding,
use_bias=use_bias,
)(x)
x = tfa.layers.InstanceNormalization(gamma_initializer=gamma_initializer)(x)
x = layers.add([input_tensor, x])
return x
def downsample(
x,
filters,
activation,
kernel_initializer=kernel_init,
kernel_size=(3, 3),
strides=(2, 2),
padding="same",
gamma_initializer=gamma_init,
use_bias=False,
):
x = layers.Conv2D(
filters,
kernel_size,
strides=strides,
kernel_initializer=kernel_initializer,
padding=padding,
use_bias=use_bias,
)(x)
x = tfa.layers.InstanceNormalization(gamma_initializer=gamma_initializer)(x)
if activation:
x = activation(x)
return x
def upsample(
x,
filters,
activation,
kernel_size=(3, 3),
strides=(2, 2),
padding="same",
kernel_initializer=kernel_init,
gamma_initializer=gamma_init,
use_bias=False,
):
x = layers.Conv2DTranspose(
filters,
kernel_size,
strides=strides,
padding=padding,
kernel_initializer=kernel_initializer,
use_bias=use_bias,
)(x)
x = tfa.layers.InstanceNormalization(gamma_initializer=gamma_initializer)(x)
if activation:
x = activation(x)
return x
"""
## Build the generators
The generator consists of downsampling blocks: nine residual blocks
and upsampling blocks. The structure of the generator is the following:
```
c7s1-64 ==> Conv block with `relu` activation, filter size of 7
d128 ====|
|-> 2 downsampling blocks
d256 ====|
R256 ====|
R256 |
R256 |
R256 |
R256 |-> 9 residual blocks
R256 |
R256 |
R256 |
R256 ====|
u128 ====|
|-> 2 upsampling blocks
u64 ====|
c7s1-3 => Last conv block with `tanh` activation, filter size of 7.
```
"""
def get_resnet_generator(
filters=64,
num_downsampling_blocks=2,
num_residual_blocks=9,
num_upsample_blocks=2,
gamma_initializer=gamma_init,
name=None,
):
img_input = layers.Input(shape=input_img_size, name=name + "_img_input")
x = ReflectionPadding2D(padding=(3, 3))(img_input)
x = layers.Conv2D(filters, (7, 7), kernel_initializer=kernel_init, use_bias=False)(
x
)
x = tfa.layers.InstanceNormalization(gamma_initializer=gamma_initializer)(x)
x = layers.Activation("relu")(x)
# Downsampling
for _ in range(num_downsampling_blocks):
filters *= 2
x = downsample(x, filters=filters, activation=layers.Activation("relu"))
# Residual blocks
for _ in range(num_residual_blocks):
x = residual_block(x, activation=layers.Activation("relu"))
# Upsampling
for _ in range(num_upsample_blocks):
filters //= 2
x = upsample(x, filters, activation=layers.Activation("relu"))
# Final block
x = ReflectionPadding2D(padding=(3, 3))(x)
x = layers.Conv2D(3, (7, 7), padding="valid")(x)
x = layers.Activation("tanh")(x)
model = keras.models.Model(img_input, x, name=name)
return model
"""
## Build the discriminators
The discriminators implement the following architecture:
`C64->C128->C256->C512`
"""
def get_discriminator(
filters=64, kernel_initializer=kernel_init, num_downsampling=3, name=None
):
img_input = layers.Input(shape=input_img_size, name=name + "_img_input")
x = layers.Conv2D(
filters,
(4, 4),
strides=(2, 2),
padding="same",
kernel_initializer=kernel_initializer,
)(img_input)
x = layers.LeakyReLU(0.2)(x)
num_filters = filters
for num_downsample_block in range(3):
num_filters *= 2
if num_downsample_block < 2:
x = downsample(
x,
filters=num_filters,
activation=layers.LeakyReLU(0.2),
kernel_size=(4, 4),
strides=(2, 2),
)
else:
x = downsample(
x,
filters=num_filters,
activation=layers.LeakyReLU(0.2),
kernel_size=(4, 4),
strides=(1, 1),
)
x = layers.Conv2D(
1, (4, 4), strides=(1, 1), padding="same", kernel_initializer=kernel_initializer
)(x)
model = keras.models.Model(inputs=img_input, outputs=x, name=name)
return model
# Get the generators
gen_G = get_resnet_generator(name="generator_G")
gen_F = get_resnet_generator(name="generator_F")
# Get the discriminators
disc_X = get_discriminator(name="discriminator_X")
disc_Y = get_discriminator(name="discriminator_Y")
"""
## Build the CycleGAN model
We will override the `train_step()` method of the `Model` class
for training via `fit()`.
"""
class CycleGan(keras.Model):
def __init__(
self,
generator_G,
generator_F,
discriminator_X,
discriminator_Y,
lambda_cycle=10.0,
lambda_identity=0.5,
):
super(CycleGan, self).__init__()
self.gen_G = generator_G
self.gen_F = generator_F
self.disc_X = discriminator_X
self.disc_Y = discriminator_Y
self.lambda_cycle = lambda_cycle
self.lambda_identity = lambda_identity
def compile(
self,
gen_G_optimizer,
gen_F_optimizer,
disc_X_optimizer,
disc_Y_optimizer,
gen_loss_fn,
disc_loss_fn,
):
super(CycleGan, self).compile()
self.gen_G_optimizer = gen_G_optimizer
self.gen_F_optimizer = gen_F_optimizer
self.disc_X_optimizer = disc_X_optimizer
self.disc_Y_optimizer = disc_Y_optimizer
self.generator_loss_fn = gen_loss_fn
self.discriminator_loss_fn = disc_loss_fn
self.cycle_loss_fn = keras.losses.MeanAbsoluteError()
self.identity_loss_fn = keras.losses.MeanAbsoluteError()
def train_step(self, batch_data):
# x is Horse and y is zebra
real_x, real_y = batch_data
# For CycleGAN, we need to calculate different
# kinds of losses for the generators and discriminators.
# We will perform the following steps here:
#
# 1. Pass real images through the generators and get the generated images
# 2. Pass the generated images back to the generators to check if we
# we can predict the original image from the generated image.
# 3. Do an identity mapping of the real images using the generators.
# 4. Pass the generated images in 1) to the corresponding discriminators.
# 5. Calculate the generators total loss (adverserial + cycle + identity)
# 6. Calculate the discriminators loss
# 7. Update the weights of the generators
# 8. Update the weights of the discriminators
# 9. Return the losses in a dictionary
with tf.GradientTape(persistent=True) as tape:
# Horse to fake zebra
fake_y = self.gen_G(real_x, training=True)
# Zebra to fake horse -> y2x
fake_x = self.gen_F(real_y, training=True)
# Cycle (Horse to fake zebra to fake horse): x -> y -> x
cycled_x = self.gen_F(fake_y, training=True)
# Cycle (Zebra to fake horse to fake zebra) y -> x -> y
cycled_y = self.gen_G(fake_x, training=True)
# Identity mapping
same_x = self.gen_F(real_x, training=True)
same_y = self.gen_G(real_y, training=True)
# Discriminator output
disc_real_x = self.disc_X(real_x, training=True)
disc_fake_x = self.disc_X(fake_x, training=True)
disc_real_y = self.disc_Y(real_y, training=True)
disc_fake_y = self.disc_Y(fake_y, training=True)
# Generator adverserial loss
gen_G_loss = self.generator_loss_fn(disc_fake_y)
gen_F_loss = self.generator_loss_fn(disc_fake_x)
# Generator cycle loss
cycle_loss_G = self.cycle_loss_fn(real_y, cycled_y) * self.lambda_cycle
cycle_loss_F = self.cycle_loss_fn(real_x, cycled_x) * self.lambda_cycle
# Generator identity loss
id_loss_G = (
self.identity_loss_fn(real_y, same_y)
* self.lambda_cycle
* self.lambda_identity
)
id_loss_F = (
self.identity_loss_fn(real_x, same_x)
* self.lambda_cycle
* self.lambda_identity
)
# Total generator loss
total_loss_G = gen_G_loss + cycle_loss_G + id_loss_G
total_loss_F = gen_F_loss + cycle_loss_F + id_loss_F
# Discriminator loss
disc_X_loss = self.discriminator_loss_fn(disc_real_x, disc_fake_x)
disc_Y_loss = self.discriminator_loss_fn(disc_real_y, disc_fake_y)
# Get the gradients for the generators
grads_G = tape.gradient(total_loss_G, self.gen_G.trainable_variables)
grads_F = tape.gradient(total_loss_F, self.gen_F.trainable_variables)
# Get the gradients for the discriminators
disc_X_grads = tape.gradient(disc_X_loss, self.disc_X.trainable_variables)
disc_Y_grads = tape.gradient(disc_Y_loss, self.disc_Y.trainable_variables)
# Update the weights of the generators
self.gen_G_optimizer.apply_gradients(
zip(grads_G, self.gen_G.trainable_variables)
)
self.gen_F_optimizer.apply_gradients(
zip(grads_F, self.gen_F.trainable_variables)
)
# Update the weights of the discriminators
self.disc_X_optimizer.apply_gradients(
zip(disc_X_grads, self.disc_X.trainable_variables)
)
self.disc_Y_optimizer.apply_gradients(
zip(disc_Y_grads, self.disc_Y.trainable_variables)
)
return {
"G_loss": total_loss_G,
"F_loss": total_loss_F,
"D_X_loss": disc_X_loss,
"D_Y_loss": disc_Y_loss,
}
"""
## Create a callback that periodically saves generated images
"""
class GANMonitor(keras.callbacks.Callback):
"""A callback to generate and save images after each epoch"""
def __init__(self, num_img=4):
self.num_img = num_img
def on_epoch_end(self, epoch, logs=None):
_, ax = plt.subplots(4, 2, figsize=(12, 12))
for i, img in enumerate(test_horses.take(self.num_img)):
prediction = self.model.gen_G(img)[0].numpy()
prediction = (prediction * 127.5 + 127.5).astype(np.uint8)
img = (img[0] * 127.5 + 127.5).numpy().astype(np.uint8)
ax[i, 0].imshow(img)
ax[i, 1].imshow(prediction)
ax[i, 0].set_title("Input image")
ax[i, 1].set_title("Translated image")
ax[i, 0].axis("off")
ax[i, 1].axis("off")
prediction = keras.preprocessing.image.array_to_img(prediction)
prediction.save(
"generated_img_{i}_{epoch}.png".format(i=i, epoch=epoch + 1)
)
plt.show()
plt.close()
"""
## Train the end-to-end model
"""
# Loss function for evaluating adversarial loss
adv_loss_fn = keras.losses.MeanSquaredError()
# Define the loss function for the generators
def generator_loss_fn(fake):
fake_loss = adv_loss_fn(tf.ones_like(fake), fake)
return fake_loss
# Define the loss function for the discriminators
def discriminator_loss_fn(real, fake):
real_loss = adv_loss_fn(tf.ones_like(real), real)
fake_loss = adv_loss_fn(tf.zeros_like(fake), fake)
return (real_loss + fake_loss) * 0.5
# Create cycle gan model
cycle_gan_model = CycleGan(
generator_G=gen_G, generator_F=gen_F, discriminator_X=disc_X, discriminator_Y=disc_Y
)
# Compile the model
cycle_gan_model.compile(
gen_G_optimizer=keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5),
gen_F_optimizer=keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5),
disc_X_optimizer=keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5),
disc_Y_optimizer=keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5),
gen_loss_fn=generator_loss_fn,
disc_loss_fn=discriminator_loss_fn,
)
# Callbacks
plotter = GANMonitor()
checkpoint_filepath = "./model_checkpoints/cyclegan_checkpoints.{epoch:03d}"
model_checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath
)
# Here we will train the model for just one epoch as each epoch takes around
# 7 minutes on a single P100 backed machine.
cycle_gan_model.fit(
tf.data.Dataset.zip((train_horses, train_zebras)),
epochs=1,
callbacks=[plotter, model_checkpoint_callback],
)
"""
Test the performance of the model.
"""
# This model was trained for 90 epochs. We will be loading those weights
# here. Once the weights are loaded, we will take a few samples from the test
# data and check the model's performance.
"""shell
curl -LO https://github.com/AakashKumarNain/CycleGAN_TF2/releases/download/v1.0/saved_checkpoints.zip
unzip -qq saved_checkpoints.zip
"""
# Load the checkpoints
weight_file = "./saved_checkpoints/cyclegan_checkpoints.090"
cycle_gan_model.load_weights(weight_file).expect_partial()
print("Weights loaded successfully")
_, ax = plt.subplots(4, 2, figsize=(10, 15))
for i, img in enumerate(test_horses.take(4)):
prediction = cycle_gan_model.gen_G(img, training=False)[0].numpy()
prediction = (prediction * 127.5 + 127.5).astype(np.uint8)
img = (img[0] * 127.5 + 127.5).numpy().astype(np.uint8)
ax[i, 0].imshow(img)
ax[i, 1].imshow(prediction)
ax[i, 0].set_title("Input image")
ax[i, 0].set_title("Input image")
ax[i, 1].set_title("Translated image")
ax[i, 0].axis("off")
ax[i, 1].axis("off")
prediction = keras.preprocessing.image.array_to_img(prediction)
prediction.save("predicted_img_{i}.png".format(i=i))
plt.tight_layout()
plt.show()
|
|
import os
import numpy as np
from time import strftime as strftime #for reduction time/date only
from astropy.io import fits as pyfits
from astropy.io import ascii
import pickle
from astropy.time import Time
from datetime import datetime
#to run this stuff
#reducebysubfolder(os.getcwd())
#should get it going
debug=False
figures=False
def loadfits(Currentfile):
hdulist = pyfits.open(Currentfile, ignore_missing_end= True)#,memmap=False
return hdulist
def save_reduced_data(input_file,sp,unc,wave=None):
print 'Saving extraction as %s'%(os.path.splitext(input_file)[0]+'_reduced.fits')
#load up file to grab header to append reduction information to
hdulist=loadfits(input_file)
head=hdulist[0].header
head['ORDERS'] = (sp.shape[1], 'Number of orders reduced')
#append header field 'how many orders',len(ord)
head['REDUTIME'] = (strftime("%c"), 'When reduced')
#append header field 'when reduced', time.strftime("%c")
head['comment'] = 'data saved as (ext,unc,wave) for each order'
if wave==None:
wave=np.arange(sp.shape[0])
data=[[]]*(sp.shape[1]+1)
for i in np.arange(sp.shape[1]):
data=np.vstack((sp[:,i],unc[:,i],wave[i,:][::-1]))
if i==0:
head['ORDER']=((i+1),'Order number')
pyfits.writeto(os.path.splitext(input_file)[0]+'_reduced.fits', data,head, clobber=True)
else:
head['ORDER']=((i+1),'Order number')
pyfits.append(os.path.splitext(input_file)[0]+'_reduced.fits', data,head)
#save file as original file + _reduced
#ie. if aug008811.fits was the file - this becomes
#aug008811_reduced.npy
hdulist.close()
#string checking for Identify image method
def Anymatching(a,b):
#print a,b
#assumes a,b = list of strings , a string respectively
#(used in header recognition)
c=any(str(k) in str(b) for k in a)
#print c
return c
#use UI to get folder selector output - call this 'filepath' for prepare
def Prepare(filepath):
#Tab1dialogtext.append("Finding fits files under selected folder")
if debug == True:
print "---------------------------------"
print " Getting directory structure"
print "---------------------------------"
os.chdir(filepath)
FITSpaths = open("FITSpaths.txt", "w")
for dirname, dirnames, filenames in os.walk('.'):
# print path to all filenames if FITS.
for filename in filenames:
if '.fits' in filename:
if 'radius_masterflat' not in filename and 'drt' not in filename and '.old' not in filename and 'master' not in filename and 'reduced' not in filename:
pathto = os.path.join(dirname,filename)
if debug == True:
print 'found %s' %pathto
#Tab1dialogtext.append(pathto)
FITSpaths.write(str(pathto) + os.linesep)
FITSpaths.close()
#Tab1dialogtext.append("Found files. Please identify image files.")
#IdentifyImage loads the list made by Prepare and figures out what the thing is
def IdentifyImage():
#reading header fields
filedata={}
filedata['FILE']=[]
keys = ['UTMJD','OBJECT', 'OBSTYPE', 'MEANRA', 'MEANDEC', 'INSTRUME', 'FOCALMOD', 'FIRMVSYS','REDUCEAS']
for i in keys:
filedata[i] = []
with open('FITSpaths.txt', 'r') as f:
global FITSfiles
FITSfiles = [line.strip() for line in f] #make list of FITSpaths
if debug == True:
print "-------------------------------"
print "Getting header information"
print "-------------------------------"
for FITSfile in FITSfiles:
PROBLEM=False
Currentfile = FITSfile
filedata['FILE'].append(Currentfile)
if debug == True:
print "Assessing files %.0f%% : (%s)." %(len(filedata['FILE'])*100/len(FITSfiles),Currentfile)
#Tab1dialogtext.append("Assessing files %.0f%% : (%s)." %( len(filedata['FILE'])*100/len(FITSfiles),Currentfile) )
hdulist=loadfits(Currentfile)
oldheader=hdulist[0].header
#try:
# hdulist = RemoveOverscan(currentfile=hdulist,savename=Currentfile)
#except: pass
try:
for j in keys: #are all headers present?
temp = hdulist[0].header[j]
if j == 'REDUCEAS':
#if hdulist[0].header['REDUCEAS'] in ['SKIP', '','[]']:
FAIL=hdulist[0].header['NON_EXISTENT_HEADER_FIELD']
#go to re-identify if skip
for j in keys: #populate table
filedata[j].append(hdulist[0].header[j])
print'%s file already identified as %s' %(Currentfile, hdulist[0].header['REDUCEAS'])
except:
if hdulist[0].header['NAXIS'] == 0 or hdulist[0].header['NAXIS '] == 0:
METHOD = 'SKIP'
print'Header shows no data - skipping'
hdulist[0].header['REDUCEAS']=(METHOD,'Reduction method to use for frame')
if hdulist[0].header != oldheader:
print'Writing SKIP to header'
hdulist.writeto(FITSfile,clobber=True)
for j in keys:
try:
filedata[j].append(hdulist[0].header[j])
except:
filedata[j].append('null')
elif Anymatching(['AAOMEGA-IFU','CYCLOPS','Cyclops','CYCLOPS2','Cyclops2','TAURUS','Taurus','Taurus '],hdulist[0].header['INSTRUME']):
METHOD = 'SKIP'
print'image is from the %s instrument - will be skipped'%hdulist[0].header['INSTRUME']
hdulist[0].header['REDUCEAS']=(METHOD,'Reduction method to use for frame')
if hdulist[0].header != oldheader:
print'Writing SKIP to header'
hdulist.writeto(FITSfile,clobber=True)
for j in keys:
try:
filedata[j].append(hdulist[0].header[j])
except:
filedata[j].append('null')
elif Anymatching(['nulnd','Nulnd','NuInd','nuind','test','Test','tests','RUN','run','PellicleTests','focus','FOCUS','Focus','dummy','DUMMY'],hdulist[0].header['OBJECT']):
METHOD = 'SKIP'
print'image is a %s - will be skipped'%hdulist[0].header['INSTRUME']
hdulist[0].header['REDUCEAS']=(METHOD,'Reduction method to use for frame')
if hdulist[0].header != oldheader:
print'Writing SKIP to header'
hdulist.writeto(FITSfile,clobber=True)
for j in keys:
try:
filedata[j].append(hdulist[0].header[j])
except:
filedata[j].append('null')
else:
for j in keys:
try:
if j=='REDUCEAS':
FAIL=hdulist[0].header['NON_EXISTENT_HEADER_FIELD']
temp = hdulist[0].header[j]
#if header is here then
filedata[j].append(temp)
#the header keyword doen't exist this happens for a variety of reasons
except:
if j in ['FIRMVSYS']:
try:
temp = hdulist[0].header['CCD_VER']
if debug == True:
print " FIRMVSYS missing - used CCD_VER"
#Tab1dialogtext.append("FIRMVSYS missing - used CCD_VER")
hdulist[0].header[j]=(temp,'Duplicated from CCD_VER')
except:
try:
temp = hdulist[0].header['DETECTOR']
if debug == True:
print "%s missing - used DETECTOR"%j
#Tab1dialogtext.append("FIRMVSYS, CCD_VER missing - used DETECTOR")
hdulist[0].header[j]=(temp,'Duplicated from DETECTOR')
except:
temp='null'
PROBLEM=True
filedata[j].append(temp)
elif j in ['UTMJD']:
print'UTMJD not found'
try:
#construct MJD from UTSTART + 1/2 * TOTALEXP
tempdate = hdulist[0].header['UTDATE']
exptime = hdulist[0].header['TOTALEXP']
exptime_days = exptime/60/60/24
tempday = (datetime.strptime(tempdate, "%Y:%m:%d")).strftime('%Y-%m-%d')
temptime = hdulist[0].header['UTSTART']
times = tempday + " " + temptime
t = Time(times, scale='utc')
newmjd = t.mjd + exptime_days
if debug == True:
print " UTMJD missing - calculated new UTMJD from UTSTART and TOTALEXP = %r" % newmjd
#Tab1dialogtext.append("UTMJD missing - calculated new UTMJD from UTSTART and TOTALEXP = %r" %newmjd)
hdulist[0].header[j]=(newmjd,'UTMJD calculated from UTSTART and TOTALEXP')
print'UTMJD created'
except KeyError:
try:
#construct MJD from UTSTART & UTEND
tempdate = hdulist[0].header['UTDATE']
exptime = hdulist[0]
tempday = datetime.strptime(tempdate, "%Y:%m:%d").strftime('%Y-%m-%d')
temptime = hdulist[0].header['UTSTART']
times = tempday + " " + temptime
t = Time(times, scale='utc')
newmjdstart = t.mjd
temptime = hdulist[0].header['UTEND']
times = tempday + " " + temptime
t = Time(times, scale='utc')
newmjdend = t.mjd
t=[newmjdstart, newmjdend]
newmjd=np.mean(t)
if debug == True:
print " UTMJD missing - calculated new UTMJD from UTSTART and UTEND = %r" %newmjd
#Tab1dialogtext.append("UTMJD missing - calculated new UTMJD form UTSTART and UTEND = %r" %newmjd)
hdulist[0].header[j]=(newmjd,'UTMJD calculated from UTSTART and UTEND')
print'UTMJD created'
except KeyError:
newmjd="null"
if debug == True:
print " UTMJD missing - unable to create one"
#Tab1dialogtext.append("UTMJD missing -unable to create one ")
#Tab1dialogtext.append("Strongly suggest SKIP as this header is malformed")
filedata[j].append(newmjd)
elif j in ['MEANRA', 'MEANDEC']:
try:
#record tracking - likely parked at zenith
temp = hdulist[0].header['TRACKING']
if debug == True:
print " %s missing - used TRACKING" % j
#Tab1dialogtext.append("No MEANRA, MEANDEC - used TRACKING header")
hdulist[0].header[j]=(temp,'Duplicated from TRACKING')
print'TRACKING used'
except KeyError:
if debug == True:
print " %s missing - null entry" % j
#Tab1dialogtext.append("No MEANRA, MEANDEC, nor TRACKING recorded")
#Tab1dialogtext.append("This is a problem if a stellar image")
temp='null'
PROBLEM = True
filedata[j].append(temp)
elif j in ['FOCALMOD','OBSTYPE']:
print '%s absent from header'%j
#might be old
if debug == True:
print" %s missing"%j
try:
temp=hdulist[0].header['DETECTOR']
#if temp in ['CCD_2','MITLL2','TEKTRONIX','MITLL2A','MITLL2a','EEV2','EEV2 ','MITLL3','TEKTRONIX_5']:
hdulist[0].header[j]=(temp,'Duplicated from DETECTOR')
print 'detector is %s, used for %s header field' %temp,j
except:
try:
temp=hdulist[0].header['INSTRUME']
#if temp in ['CCD_2','MITLL2','TEKTRONIX','MITLL2A','MITLL2a','EEV2','EEV2 ','MITLL3','TEKTRONIX_5']:
hdulist[0].header[j]=(temp,'Duplicated from INSTRUME')
print 'detector is %s, used for %s header field' %temp,j
except:
print 'detector not recognised, that is not ideal'
temp='null'
#PROBLEM=True
filedata[j].append(temp)
elif j !='REDUCEAS':#damned typo!!!!
#problem file - go to user id mode
if debug == True:
print " %s missing - null entry" % j
filedata[j].append('null')
#Tab1dialogtext.append("Something is missing from the header of this file")
#Tab1dialogtext.append("Waiting for user identification of this image type")
PROBLEM = True
print "%s not found in header of %s"%(j,Currentfile)
#Choosing how reduction treats each frame. Note old (1998) spectra have no FOCALMOD, OBSTYPE - deal with it
try:
if Anymatching(['bias', 'BIAS', 'Bias','BIAS_0001'],hdulist[0].header['OBJECT']) and hdulist[0].header['TOTALEXP']== 0:
METHOD = 'BIAS'
elif Anymatching(['null','Null','NULL'],hdulist[0].header['UTMJD']):
METHOD='SKIP'
elif Anymatching(['FLAT', 'Flat', 'flat','wideflat','WIDEFLAT','Wdieflat','wdieflat','Flat sl=6arcsec'],hdulist[0].header['OBSTYPE'])and Anymatching(['CLEAR', 'Clear' ,'clear'],hdulist[0].header['FOCALMOD'])and Anymatching(['WIDEFLAT', 'WideFlat', 'wideflat', 'Wideflat','Flat sl=6arcsec','wide','wdie','WIDE','WDIE'],hdulist[0].header['OBJECT']):
METHOD = 'WIDEFLAT'
elif Anymatching(['iodine', 'IODINE', 'Iodine','I2','Iodine 0.5arcsec','Iodine '],hdulist[0].header['OBJECT']) and Anymatching(['Iodine ','iodine', 'IODINE', 'Iodine','I2','Iodine 0.5arcsec'],hdulist[0].header['FOCALMOD']) and Anymatching(['FLAT', 'Flat', 'flat'], hdulist[0].header['OBSTYPE']):
METHOD = 'I2FLAT'
elif Anymatching(['FLAT', 'Flat', 'flat','narrowflat','NARROWFLAT','narrow','Narrow','NARROW'],hdulist[0].header['OBSTYPE']) and Anymatching(['CLEAR', 'Clear' ,'clear'],hdulist[0].header['FOCALMOD'])and Anymatching(['NARROWFLAT', 'NarrowFlat', 'narrowflat', 'Narrowflat','Narrow','NARROW','narrow'],hdulist[0].header['OBJECT']) :
METHOD = 'NARROWFLAT'
elif Anymatching(['ARC', 'Arc', 'arc','THAR','thar','ThAr','Thar','ThAr0.5px','ThAr1.0px','ThAr 0.5pix+cell'],hdulist[0].header['OBJECT']) and Anymatching(['ThAr', 'THAR', 'Thorium', 'thar','ARC', 'Arc', 'arc','ThAr0.5px','ThAr1.0px','ThAr 0.5pix+cell'],hdulist[0].header['OBSTYPE']):
METHOD = 'ARC' #problems identifying arc in str(arcsec)
# many of these lack additional checks apart from just OBJECT - may need some stat measure and/or ECHGAMMA, ECHTHETA and != 31 l/mm ??.
elif Anymatching(['WIDEFLAT', 'WideFlat', 'wideflat', 'Wideflat','wide','WIDE','Wide','FibFlat','SlitFlat','Slitflat','Wdieflat','wdieflat'],hdulist[0].header['OBJECT']) :
METHOD = 'WIDEFLAT'
elif Anymatching(['iodine', 'IODINE', 'Iodine','Iflat','I2flat','Iflat','IFLAT','I2','Iodine 0.5arcsec','Iodine '],hdulist[0].header['OBJECT']):
METHOD = 'I2FLAT'
elif Anymatching(['NARROWFLAT', 'NarrowFlat', 'narrowflat', 'Narrowflat'],hdulist[0].header['OBJECT']):
METHOD = 'NARROWFLAT'
elif ((Anymatching(['iodine', 'IODINE', 'Iodine'],hdulist[0].header['FOCALMOD']) and hdulist[0].header['TOTALEXP']!= 0) or hdulist[0].header['OBJECT'].isdigit()) and not Anymatching(['flat','Flat','arc','ARC','Thar','THAR','quatrz','QUARTZ', 'RUN','run','Run','Test','test'],hdulist[0].header['OBJECT']):
METHOD = 'SCIENCE'
elif Anymatching(['DARK', 'Dark', 'dark'],hdulist[0].header['OBJECT']):
METHOD = 'DARK'
#print 'Goodness you found a dark image - how rare'
elif Anymatching(['bias', 'BIAS', 'Bias','BIAS_0001'],hdulist[0].header['OBJECT']) :
METHOD = 'BIAS'
elif Anymatching(['ARC', 'Arc', 'arc','THAR','thar','ThAr','Thar','ThAr0.5px','ThAr1.0px','ThAr 0.5pix+cell'],hdulist[0].header['OBJECT']) :
METHOD = 'ARC'
elif hdulist[0].header['TOTALEXP']!= 0 and Anymatching(['HD','hd','Hd','HR','hr','Hr'],hdulist[0].header['OBJECT']) :
METHOD = 'SCIENCE' #RISKY!
elif hdulist[0].header['OBJECT']=='IODINE':
METHOD = 'I2FLAT'
elif hdulist[0].header['OBJECT']=='THAR':
METHOD = 'ARC'
elif hdulist[0].header['OBJECT']=='WIDEFLAT':
METHOD = 'WIDEFLAT'
elif hdulist[0].header['OBJECT'].isdigit():
METHOD = 'SCIENCE'
else: METHOD='SKIP'
hdulist[0].header['REDUCEAS']=(METHOD,'Reduction method to use for frame')
except:
PROBLEM = True
print "Identification problem in %s" %Currentfile
METHOD='SKIP'
hdulist[0].header['REDUCEAS']=(METHOD,'Reduction method to use for frame')
if PROBLEM ==False:
if METHOD!='SCIENCE':
print "%s file identified as %s" %(Currentfile, METHOD)
else:
print "%s file identified as %s with object %s" %(Currentfile, METHOD, hdulist[0].header['OBJECT'])
#Tab1dialogtext.append("%s file identified as %s" %(Currentfile, METHOD))
else:
if debug == True:
Tab1plots(Currentfile)
break
else:
METHOD = 'SKIP'
print "%s file not identified: to skip" %(Currentfile)
#Tab1dialogtext.append("%s file not identified: %s" %(Currentfile, METHOD))
hdulist[0].header['REDUCEAS']=(METHOD,'Reduction method to use for frame')
if hdulist[0].header['REDUCEAS'] != oldheader['REDUCEAS']:
print'Writing file changes'
hdulist.writeto(FITSfile,clobber=True)
#del hdulist #if memory woes
print "Percent done: %s"%(len(filedata['FILE'])*100/len(FITSfiles))
filedata['REDUCEAS'].append(METHOD)
if len(filedata['REDUCEAS'])!=len(filedata['UTMJD']):
for j in keys:
print j,hdulist[0].header[j]
print"***** problem with %s, reduction methods allocated not same as UTMJDs found"%Currentfile
#np.save('data_array', filedata)
ascii.write(filedata, "Inventoryspectra.dat")
np.save('filedata',filedata)
return filedata
#master image methods methods
def make_master_bias(filedata):
#filedata=np.load('filedata.npy')
biasfiledata={} #preset structure
keys = ['UTMJD','OBJECT', 'OBSTYPE', 'MEANRA', 'MEANDEC', 'INSTRUME', 'FOCALMOD', 'FIRMVSYS','REDUCEAS','FILE']
for i in keys:
biasfiledata[i] = []
#global filedata
for j in range(len( filedata['REDUCEAS'] )):
if filedata['REDUCEAS'][j] == 'BIAS':
for k in keys:
biasfiledata[k].append(filedata[k][j])
if len(biasfiledata)%2==0: #if even number of files
np.delete(biasfiledata, 0, 0) #delete first entry to make number odd
biasfiles=biasfiledata['FILE'] #this is a list of filenames
if len(biasfiles)<=1:
print"Error: Too few identified bias files"
#Tab2dialogtext.append("Unable to make master bias: Too few identifed bias files!")
else:
biasdata={} #preset structure
print "loading %.0f bias image arrays" % len(biasfiles)
for f in biasfiles: #for each filename
biasdata[f] = pyfits.getdata(f) #use filename as header and read data as array for each header
print "making master bias image - this can take a little time"
medianbias=np.median(biasdata.values(),axis=0) #take median of all the data arrays
print "done making master bias - saving and plotting for inspection"
hdu=pyfits.PrimaryHDU(medianbias)
hdu.header.add_comment("Master bias constructed from median of %.0f bias images" % len(biasfiles))
meanut= np.mean(biasfiledata['UTMJD'])
hdu.header['UTMJD']=(meanut,'mean UTMJD of raw bias images used')
minut=min(biasfiledata['UTMJD'])
maxut=max(biasfiledata['UTMJD'])
hdu.header['MINUTMJD']=(minut,'minimum UTMJD of raw bias images used')
hdu.header['MAXUTMJD']=(maxut,'maximum UTMJD of raw bias images used')
headget=pyfits.getheader(biasfiles[0])
try:
hdu.header['FOCALMOD']=headget['FOCALMOD']
except:
hdu.header['FOCALMOD']=headget['OBSTYPE']
hdulist = pyfits.HDUList([hdu])
hdulist.writeto("radius_masterbias.fits", clobber=True)
hdulist.close()
def make_master_flat(filedata):
#filedata=np.load('filedata.npy')
flatfiledata={} #preset structure
keys = ['UTMJD','OBJECT', 'OBSTYPE', 'MEANRA', 'MEANDEC', 'INSTRUME', 'FOCALMOD', 'FIRMVSYS','REDUCEAS','FILE']
for i in keys:
flatfiledata[i] = []
#global filedata
for j in range(len( filedata['REDUCEAS'] )):
if filedata['REDUCEAS'][j] =='WIDEFLAT':
for k in keys:
flatfiledata[k].append(filedata[k][j])
if len(flatfiledata)%2==0: #if even number of files
np.delete(flatfiledata, 0, 0) #delete first entry to make number odd
flatfiles=flatfiledata['FILE'] #this is a list of filenames
if len(flatfiles)<=1:
print"Error: Too few identified flat files"
#Tab2dialogtext.append("Unable to make master flat: Too few identifed flat files!")
else:
flatdata={} #preset structure
print "loading %s flat image arrays" % len(flatfiles)
for f in flatfiles: #for each filename
flatdata[f] = pyfits.getdata(f) #use filename as header and read data as array for each header
print "making master flat image - this can take a little time"
medianflat=np.median(flatdata.values(),axis=0) #take median of all the data arrays
print "done making master flat - saving and plotting for inspection"
hdu=pyfits.PrimaryHDU(medianflat)
hdu.header.add_comment("Master flat constructed from median of %.0f flat images" % len(flatfiles))
meanut= np.mean(flatfiledata['UTMJD'])
hdu.header['UTMJD']=(meanut,'mean UTMJD of raw flat images used')
minut=min(flatfiledata['UTMJD'])
maxut=max(flatfiledata['UTMJD'])
hdu.header['MINUTMJD']=(minut,'minimum UTMJD of raw flat images used')
hdu.header['MAXUTMJD']=(maxut,'maximum UTMJD of raw flat images used')
headget=pyfits.getheader(flatfiles[0])
try:
hdu.header['FOCALMOD']=headget['FOCALMOD']
except:
hdu.header['FOCALMOD']=headget['DETECTOR']
#need gain and ro_noise too
try:
hdu.header['RO_GAIN']=headget['RO_GAIN']
except:
try:
hdu.header['RO_GAIN']=headget['GAIN']
except: pass
try:
hdu.header['RO_NOISE']=headget['RO_NOISE']
except:
try:
hdu.header['RO_NOISE']=headget['NOISE']
except: pass
hdulist = pyfits.HDUList([hdu])
hdulist.writeto("radius_masterflat.fits", clobber=True)
hdulist.close()
###sequential reduction
def reducebysubfolder(filepath):
initial_path= os.getcwd()
for f in os.listdir(filepath):
print "Current working directory is %s" % initial_path
child=os.path.join(filepath,f)
if os.path.isdir(child):
print f
print'*** Beginning reduction of files within %s ***'%child
Prepare(child)
print'*** Identifying files ***'
filedata=IdentifyImage()
#filedata=np.load('filedata.npy')
print'*** Making master templates ***'
make_master_bias(filedata=filedata) #likely to fail as very few bias images
make_master_flat(filedata=filedata) #uses filelist and 'REDUCEAS' categorisation
trace_orders() #uses saved master flat, saves orderparameters to file
print'*** Reducing Master Flat ***'
blazefunctions,var,normed_flat,mask,slitfuncmodel=extract_and_normalise_flat()
#save_reduced_data('radius_masterflat.fits',blazefunctions,var)
print'*** Reducing Thoriums ***'
for j in range(len( filedata['REDUCEAS'] )):
if filedata['REDUCEAS'][j] =='ARC':
input_file=filedata['FILE'][j]
print 'Processing %s'%input_file
try:
sp,unc=extract_arc(input_file=input_file)
wave=arc_wavelength_solution(hdu)
save_reduced_data(input_file,sp,unc,wave)
except: print 'Extraction FAILED for %s' %input_file
print'*** Reducing Iodine Flats ***'
for j in range(len( filedata['REDUCEAS'] )):
if filedata['REDUCEAS'][j] =='I2FLAT':
input_file=filedata['FILE'][j]
print 'Processing %s'%input_file
try:
sp,unc=extract_science_or_I2flat(input_file=input_file)
save_reduced_data(input_file,sp,unc)
except: print 'Extraction FAILED for %s' %input_file
print'*** Reducing Stellar Exposures ***'
for j in range(len( filedata['REDUCEAS'] )):
if filedata['REDUCEAS'][j] =='SCIENCE':
input_file=filedata['FILE'][j]
print 'Processing %s'%input_file
try:
sp,unc=extract_science_or_I2flat(input_file=input_file)
save_reduced_data(input_file,sp,unc)
except: print 'Extraction FAILED for %s' %input_file
print'*** Reduction in %s completed ***'%child
os.chdir(initial_path)
|
|
"""
Query and response objects for the Meduza protocol.
** * * WARNING * * **
Note that these objects are encoded to BSON and the server decodes them and expects them to have certain fields
in a very specific naming. So DO NOT ALTER these objects - not field names, not object names, not removing fields -
without knowing what you're doing and modifying the server code accordingly.
** * * /WARNING * * **
"""
import time
import datetime
class Condition(object):
"""
Selection condition constants for use when constructing filters
"""
IN = "IN"
EQ = "="
GT = ">"
LT = "<"
ALL = "ALL"
def nanoseconds(seconds):
return long(seconds * 1000000000)
class Entity(object):
"""
An entity represents a stored object in it's raw, schemaless form.
You can work with entities directly, but for most cases you're better off mapping them to real objects
"""
ID = 'Id'
def __init__(self, _key, **properties):
self.id = _key
self.properties = properties
self.ttl = 0
def __repr__(self):
return 'Entity<%s>: %s' % (self.id, self.properties)
def expire(self, seconds):
if seconds > 0:
# convert the TTL to nanoseconds, which is how go serializes time durations
self.ttl = nanoseconds(seconds)
class Response(object):
"""
The base class for all responses. Includes the query processing time on the server, and the error returned from it
"""
def __init__(self, **kwargs):
self.error = kwargs.get('error', None)
self.time = kwargs.get('time', )
class Filter(object):
"""
A query selection filter, used to select objects for laoding or deletion
"""
def __init__(self, property, op, *values):
self.property = property
self.op = op
self.values = values
def __repr__(self):
return "Filter{%s %s %s}" % (self.property, self.op, self.values)
def __and__(self, other):
if isinstance(other, (list, tuple)):
return (self,) + other
return tuple((self, other))
class Ordering(object):
"""
Representing the sort order of a query
"""
ASC = 'ASC'
DESC = 'DESC'
def __init__(self, by, mode=ASC):
self.by = by
self.asc = mode == Ordering.ASC
@classmethod
def asc(cls, by):
return Ordering(by, cls.ASC)
@classmethod
def desc(cls, by):
return Ordering(by, cls.DESC)
class Paging(object):
"""
Paging represents the paging limitations of a selection query
"""
def __init__(self, offset=0, limit=100):
self.offset = offset
self.limit = limit
def Filters(*filters):
ret = {}
for flt in filters:
ret[flt.property] = flt
return ret
class GetQuery(object):
"""
GetQuery encodes the parameters to get objects from the server
"""
def __init__(self, table, properties = tuple(), filters=tuple(), order=None, paging=None):
self.table = table
self.properties = list(properties)
self.filters = Filters(*filters)
self.order = order
self.paging =paging or Paging()
def filter(self, prop, condition, *values):
"""
Adds a filter (WHERE <prop> <condition> <values...>) to the query
:param prop: property name for filtering
:param condition: IN/=/LT/GT/...
:param values: filtered values
:return: the query object itself for builder-style syntax
"""
self.filters[prop] = Filter(prop, condition, *values)
return self
def all(self):
"""
Add a special filter to page on all ids with a certain paging
:return:
"""
self.filters[Entity.ID] = Condition.ALL
def limit(self, limit):
"""
Set limit on the first N records for this query. We assume offset 0
:return: the query object itself for builder-style syntax
"""
self.paging = Paging(0, limit)
return self
def page(self, offset, limit):
"""
Set more complex paging offsets
:param offset: where to begin fetching objects at
:param limit: number of object to fetch
:return: the query object itself for builder-style syntax
"""
if offset >= limit or offset < 0 or limit <= 0:
raise ValueError("Invalid offset/limit: {}-{}".format(offset,limit))
self.paging= Paging(offset,limit)
return self
class GetResponse(Response):
"""
GetResponse is a response to a Get query, with the selected entities embedded in it
"""
def __init__(self, **kwargs):
Response.__init__(self, **kwargs['Response'])
self.entities = [Entity(e['id'], **e['properties']) for e in kwargs.get('entities', [])]
self.total = kwargs.get('total', 0)
def load(self, model):
ret = []
for e in self.entities:
ret.append(model.decode(e))
return ret
def loadOne(self, model):
if len(self.entities) == 0:
return None
return model.decode(self.entities[0])
class PutQuery(object):
"""
PutQuery is a batch insert/update query, pushing multiple objects at once.
It's the fastest way to create multiple objects at once, and can create hundreds of objects in a single go
"""
def __init__(self, table, *entities):
self.table = table
self.entities = list(entities)
def add(self, entity):
"""
Add an entity to be sent to the server
:param entity: an entity object. ** It can (and should) be with an empty id if you're inserting **
:return: the query object itself for builder-style syntax
"""
self.entities.append(entity)
return self
class PutResponse(Response):
"""
PutResponse represents the response from a PUT query.
It holds the ids of the put objects, whether they were new inserts or just updates/
"""
def __init__(self, **kwargs):
Response.__init__(self, **kwargs['Response'])
self.ids = kwargs.get('ids', [])
def __repr__(self):
return 'PutResponse%s' % self.__dict__
class DelQuery(object):
"""
DelQuery sets filters telling the server what objects to delete. It returns the number of objects deleted
"""
def __init__(self, table, *filters):
self.table = table
self.filters = Filters(*filters)
def filter(self, prop, op, *values):
self.filters[prop] = Filter(prop, op, *values)
return self
class DelResponse(Response):
def __init__(self, **kwargs):
Response.__init__(self, **kwargs.get('Response', {}))
self.num = kwargs.get('num', 0)
class Change(object):
Set = "SET"
Del = "DEL"
Increment = "INCR"
SetAdd = "SADD"
SetDel = "SDEL"
MapSet = "MSET"
MapDel = "MDEL"
Expire = "EXP"
DelProperty = "PDEL"
_supported = {Set, Increment, Expire, DelProperty}
def __init__(self, property, op, value):
if op not in self._supported:
raise ValueError("op %s not supported", op)
self.property = property
self.op = op
self.value = value
@classmethod
def set(cls, prop, val):
"""
Create a SET change
"""
return Change(prop, cls.Set, val)
@classmethod
def expire(cls, seconds):
"""
Expire entity(ies) in T seconds
"""
return Change("", cls.Expire, nanoseconds(seconds))
@classmethod
def delProperty(cls, prop):
"""
Delete one property from an entity by its name
"""
return Change( prop, cls.DelProperty, "")
class UpdateQuery(object):
"""
DelQuery sets filters telling the server what objects to delete. It returns the number of objects deleted
"""
def __init__(self, table, filters, *changes):
self.table = table
self.filters = Filters(*filters)
self.changes = changes
def filter(self, prop, operator, *values):
"""
Add an extra selection filter for what objects to update
:param prop: the name of the filtered property
:param operator: the filter operator (equals, gt, in, etc)
:param values: the values for selection (e.g. "id" "in" 1,2,34)
:return: the update query itself
"""
self.filters[prop] = Filter(prop, operator, *values)
return self
def set(self, prop, val):
"""
Add another SET change of a property to the query
:param prop: the name of the changed property
:param val: the changed value
:return: the update query object itself
"""
self.changes.append(Change.set(prop, val))
return self
def expire(self, seconds):
"""
Expire the selected entities in T seconds
:param seconds:
:return:
"""
self.changes.append(Change.expire(seconds))
return self
def delProperties(self, *properties):
"""
Delete whole properties from the selected entities
:param properties: the property names to delete, at least one
"""
for prop in properties:
self.changes.append(Change.delProperty(prop))
return self
class UpdateResponse(Response):
def __init__(self, **kwargs):
Response.__init__(self, **kwargs.get('Response', {}))
self.num = kwargs.get('num', 0)
class PingQuery(object):
def __init__(self):
pass
class PingResponse(Response):
pass
|
|
# -*- coding: utf-8 -*-
"""
Tests that apply specifically to the CParser. Unless specifically stated
as a CParser-specific issue, the goal is to eventually move as many of
these tests out of this module as soon as the Python parser can accept
further arguments when parsing.
"""
import os
import sys
import tarfile
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas import DataFrame
from pandas.compat import StringIO, range, lrange
class CParserTests(object):
def test_buffer_overflow(self):
# see gh-9205: test certain malformed input files that cause
# buffer overflows in tokenizer.c
malfw = "1\r1\r1\r 1\r 1\r" # buffer overflow in words pointer
malfs = "1\r1\r1\r 1\r 1\r11\r" # buffer overflow in stream pointer
malfl = "1\r1\r1\r 1\r 1\r11\r1\r" # buffer overflow in lines pointer
cperr = 'Buffer overflow caught - possible malformed input file.'
for malf in (malfw, malfs, malfl):
try:
self.read_table(StringIO(malf))
except Exception as err:
assert cperr in str(err)
def test_buffer_rd_bytes(self):
# see gh-12098: src->buffer in the C parser can be freed twice leading
# to a segfault if a corrupt gzip file is read with 'read_csv' and the
# buffer is filled more than once before gzip throws an exception
data = '\x1F\x8B\x08\x00\x00\x00\x00\x00\x00\x03\xED\xC3\x41\x09' \
'\x00\x00\x08\x00\xB1\xB7\xB6\xBA\xFE\xA5\xCC\x21\x6C\xB0' \
'\xA6\x4D' + '\x55' * 267 + \
'\x7D\xF7\x00\x91\xE0\x47\x97\x14\x38\x04\x00' \
'\x1f\x8b\x08\x00VT\x97V\x00\x03\xed]\xefO'
for i in range(100):
try:
self.read_csv(StringIO(data),
compression='gzip',
delim_whitespace=True)
except Exception:
pass
def test_delim_whitespace_custom_terminator(self):
# See gh-12912
data = """a b c~1 2 3~4 5 6~7 8 9"""
df = self.read_csv(StringIO(data), lineterminator='~',
delim_whitespace=True)
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['a', 'b', 'c'])
tm.assert_frame_equal(df, expected)
def test_dtype_and_names_error(self):
# see gh-8833: passing both dtype and names
# resulting in an error reporting issue
data = """
1.0 1
2.0 2
3.0 3
"""
# base cases
result = self.read_csv(StringIO(data), sep=r'\s+', header=None)
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]])
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), sep=r'\s+',
header=None, names=['a', 'b'])
expected = DataFrame(
[[1.0, 1], [2.0, 2], [3.0, 3]], columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
# fallback casting
result = self.read_csv(StringIO(
data), sep=r'\s+', header=None,
names=['a', 'b'], dtype={'a': np.int32})
expected = DataFrame([[1, 1], [2, 2], [3, 3]],
columns=['a', 'b'])
expected['a'] = expected['a'].astype(np.int32)
tm.assert_frame_equal(result, expected)
data = """
1.0 1
nan 2
3.0 3
"""
# fallback casting, but not castable
with tm.assert_raises_regex(ValueError, 'cannot safely convert'):
self.read_csv(StringIO(data), sep=r'\s+', header=None,
names=['a', 'b'], dtype={'a': np.int32})
def test_unsupported_dtype(self):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__unsupported_dtype__.csv') as path:
df.to_csv(path)
# valid but we don't support it (date)
pytest.raises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
pytest.raises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
pytest.raises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
# valid but unsupported - fixed width unicode string
pytest.raises(TypeError, self.read_csv, path,
dtype={'A': 'U8'},
index_col=0)
@td.skip_if_32bit
def test_precise_conversion(self):
from decimal import Decimal
normal_errors = []
precise_errors = []
# test numbers between 1 and 2
for num in np.linspace(1., 2., num=500):
# 25 decimal digits of precision
text = 'a\n{0:.25}'.format(num)
normal_val = float(self.read_csv(StringIO(text))['a'][0])
precise_val = float(self.read_csv(
StringIO(text), float_precision='high')['a'][0])
roundtrip_val = float(self.read_csv(
StringIO(text), float_precision='round_trip')['a'][0])
actual_val = Decimal(text[2:])
def error(val):
return abs(Decimal('{0:.100}'.format(val)) - actual_val)
normal_errors.append(error(normal_val))
precise_errors.append(error(precise_val))
# round-trip should match float()
assert roundtrip_val == float(text[2:])
assert sum(precise_errors) <= sum(normal_errors)
assert max(precise_errors) <= max(normal_errors)
def test_usecols_dtypes(self):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(0, 1, 2),
names=('a', 'b', 'c'),
header=None,
converters={'a': str},
dtype={'b': int, 'c': float},
)
result2 = self.read_csv(StringIO(data), usecols=(0, 2),
names=('a', 'b', 'c'),
header=None,
converters={'a': str},
dtype={'b': int, 'c': float},
)
assert (result.dtypes == [object, np.int, np.float]).all()
assert (result2.dtypes == [object, np.float]).all()
def test_disable_bool_parsing(self):
# #2090
data = """A,B,C
Yes,No,Yes
No,Yes,Yes
Yes,,Yes
No,No,No"""
result = self.read_csv(StringIO(data), dtype=object)
assert (result.dtypes == object).all()
result = self.read_csv(StringIO(data), dtype=object, na_filter=False)
assert result['B'][2] == ''
def test_custom_lineterminator(self):
data = 'a,b,c~1,2,3~4,5,6'
result = self.read_csv(StringIO(data), lineterminator='~')
expected = self.read_csv(StringIO(data.replace('~', '\n')))
tm.assert_frame_equal(result, expected)
def test_parse_ragged_csv(self):
data = """1,2,3
1,2,3,4
1,2,3,4,5
1,2
1,2,3,4"""
nice_data = """1,2,3,,
1,2,3,4,
1,2,3,4,5
1,2,,,
1,2,3,4,"""
result = self.read_csv(StringIO(data), header=None,
names=['a', 'b', 'c', 'd', 'e'])
expected = self.read_csv(StringIO(nice_data), header=None,
names=['a', 'b', 'c', 'd', 'e'])
tm.assert_frame_equal(result, expected)
# too many columns, cause segfault if not careful
data = "1,2\n3,4,5"
result = self.read_csv(StringIO(data), header=None,
names=lrange(50))
expected = self.read_csv(StringIO(data), header=None,
names=lrange(3)).reindex(columns=lrange(50))
tm.assert_frame_equal(result, expected)
def test_tokenize_CR_with_quoting(self):
# see gh-3453
data = ' a,b,c\r"a,b","e,d","f,f"'
result = self.read_csv(StringIO(data), header=None)
expected = self.read_csv(StringIO(data.replace('\r', '\n')),
header=None)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data))
expected = self.read_csv(StringIO(data.replace('\r', '\n')))
tm.assert_frame_equal(result, expected)
def test_grow_boundary_at_cap(self):
# See gh-12494
#
# Cause of error was that the C parser
# was not increasing the buffer size when
# the desired space would fill the buffer
# to capacity, which would later cause a
# buffer overflow error when checking the
# EOF terminator of the CSV stream
def test_empty_header_read(count):
s = StringIO(',' * count)
expected = DataFrame(columns=[
'Unnamed: {i}'.format(i=i)
for i in range(count + 1)])
df = self.read_csv(s)
tm.assert_frame_equal(df, expected)
for count in range(1, 101):
test_empty_header_read(count)
def test_parse_trim_buffers(self):
# This test is part of a bugfix for issue #13703. It attempts to
# to stress the system memory allocator, to cause it to move the
# stream buffer and either let the OS reclaim the region, or let
# other memory requests of parser otherwise modify the contents
# of memory space, where it was formally located.
# This test is designed to cause a `segfault` with unpatched
# `tokenizer.c`. Sometimes the test fails on `segfault`, other
# times it fails due to memory corruption, which causes the
# loaded DataFrame to differ from the expected one.
# Generate a large mixed-type CSV file on-the-fly (one record is
# approx 1.5KiB).
record_ = \
"""9999-9,99:99,,,,ZZ,ZZ,,,ZZZ-ZZZZ,.Z-ZZZZ,-9.99,,,9.99,Z""" \
"""ZZZZ,,-99,9,ZZZ-ZZZZ,ZZ-ZZZZ,,9.99,ZZZ-ZZZZZ,ZZZ-ZZZZZ,""" \
"""ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,9""" \
"""99,ZZZ-ZZZZ,,ZZ-ZZZZ,,,,,ZZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZ,,,9,9,""" \
"""9,9,99,99,999,999,ZZZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZ,9,ZZ-ZZZZ,9.""" \
"""99,ZZ-ZZZZ,ZZ-ZZZZ,,,,ZZZZ,,,ZZ,ZZ,,,,,,,,,,,,,9,,,999.""" \
"""99,999.99,,,ZZZZZ,,,Z9,,,,,,,ZZZ,ZZZ,,,,,,,,,,,ZZZZZ,ZZ""" \
"""ZZZ,ZZZ-ZZZZZZ,ZZZ-ZZZZZZ,ZZ-ZZZZ,ZZ-ZZZZ,ZZ-ZZZZ,ZZ-ZZ""" \
"""ZZ,,,999999,999999,ZZZ,ZZZ,,,ZZZ,ZZZ,999.99,999.99,,,,Z""" \
"""ZZ-ZZZ,ZZZ-ZZZ,-9.99,-9.99,9,9,,99,,9.99,9.99,9,9,9.99,""" \
"""9.99,,,,9.99,9.99,,99,,99,9.99,9.99,,,ZZZ,ZZZ,,999.99,,""" \
"""999.99,ZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,,,ZZZZZ,ZZZZZ,ZZZ,ZZZ,9,9,""" \
""",,,,,ZZZ-ZZZZ,ZZZ999Z,,,999.99,,999.99,ZZZ-ZZZZ,,,9.999""" \
""",9.999,9.999,9.999,-9.999,-9.999,-9.999,-9.999,9.999,9.""" \
"""999,9.999,9.999,9.999,9.999,9.999,9.999,99999,ZZZ-ZZZZ,""" \
""",9.99,ZZZ,,,,,,,,ZZZ,,,,,9,,,,9,,,,,,,,,,ZZZ-ZZZZ,ZZZ-Z""" \
"""ZZZ,,ZZZZZ,ZZZZZ,ZZZZZ,ZZZZZ,,,9.99,,ZZ-ZZZZ,ZZ-ZZZZ,ZZ""" \
""",999,,,,ZZ-ZZZZ,ZZZ,ZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,,,99.99,99.99""" \
""",,,9.99,9.99,9.99,9.99,ZZZ-ZZZZ,,,ZZZ-ZZZZZ,,,,,-9.99,-""" \
"""9.99,-9.99,-9.99,,,,,,,,,ZZZ-ZZZZ,,9,9.99,9.99,99ZZ,,-9""" \
""".99,-9.99,ZZZ-ZZZZ,,,,,,,ZZZ-ZZZZ,9.99,9.99,9999,,,,,,,""" \
""",,,-9.9,Z/Z-ZZZZ,999.99,9.99,,999.99,ZZ-ZZZZ,ZZ-ZZZZ,9.""" \
"""99,9.99,9.99,9.99,9.99,9.99,,ZZZ-ZZZZZ,ZZZ-ZZZZZ,ZZZ-ZZ""" \
"""ZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZZ,ZZZ,ZZZ,ZZZ,ZZZ,9.99,,,-9.99,ZZ""" \
"""-ZZZZ,-999.99,,-9999,,999.99,,,,999.99,99.99,,,ZZ-ZZZZZ""" \
"""ZZZ,ZZ-ZZZZ-ZZZZZZZ,,,,ZZ-ZZ-ZZZZZZZZ,ZZZZZZZZ,ZZZ-ZZZZ""" \
""",9999,999.99,ZZZ-ZZZZ,-9.99,-9.99,ZZZ-ZZZZ,99:99:99,,99""" \
""",99,,9.99,,-99.99,,,,,,9.99,ZZZ-ZZZZ,-9.99,-9.99,9.99,9""" \
""".99,,ZZZ,,,,,,,ZZZ,ZZZ,,,,,"""
# Set the number of lines so that a call to `parser_trim_buffers`
# is triggered: after a couple of full chunks are consumed a
# relatively small 'residual' chunk would cause reallocation
# within the parser.
chunksize, n_lines = 128, 2 * 128 + 15
csv_data = "\n".join([record_] * n_lines) + "\n"
# We will use StringIO to load the CSV from this text buffer.
# pd.read_csv() will iterate over the file in chunks and will
# finally read a residual chunk of really small size.
# Generate the expected output: manually create the dataframe
# by splitting by comma and repeating the `n_lines` times.
row = tuple(val_ if val_ else np.nan
for val_ in record_.split(","))
expected = pd.DataFrame([row for _ in range(n_lines)],
dtype=object, columns=None, index=None)
# Iterate over the CSV file in chunks of `chunksize` lines
chunks_ = self.read_csv(StringIO(csv_data), header=None,
dtype=object, chunksize=chunksize)
result = pd.concat(chunks_, axis=0, ignore_index=True)
# Check for data corruption if there was no segfault
tm.assert_frame_equal(result, expected)
# This extra test was added to replicate the fault in gh-5291.
# Force 'utf-8' encoding, so that `_string_convert` would take
# a different execution branch.
chunks_ = self.read_csv(StringIO(csv_data), header=None,
dtype=object, chunksize=chunksize,
encoding='utf_8')
result = pd.concat(chunks_, axis=0, ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_internal_null_byte(self):
# see gh-14012
#
# The null byte ('\x00') should not be used as a
# true line terminator, escape character, or comment
# character, only as a placeholder to indicate that
# none was specified.
#
# This test should be moved to common.py ONLY when
# Python's csv class supports parsing '\x00'.
names = ['a', 'b', 'c']
data = "1,2,3\n4,\x00,6\n7,8,9"
expected = pd.DataFrame([[1, 2.0, 3], [4, np.nan, 6],
[7, 8, 9]], columns=names)
result = self.read_csv(StringIO(data), names=names)
tm.assert_frame_equal(result, expected)
def test_read_nrows_large(self):
# gh-7626 - Read only nrows of data in for large inputs (>262144b)
header_narrow = '\t'.join(['COL_HEADER_' + str(i)
for i in range(10)]) + '\n'
data_narrow = '\t'.join(['somedatasomedatasomedata1'
for i in range(10)]) + '\n'
header_wide = '\t'.join(['COL_HEADER_' + str(i)
for i in range(15)]) + '\n'
data_wide = '\t'.join(['somedatasomedatasomedata2'
for i in range(15)]) + '\n'
test_input = (header_narrow + data_narrow * 1050 +
header_wide + data_wide * 2)
df = self.read_csv(StringIO(test_input), sep='\t', nrows=1010)
assert df.size == 1010 * 10
def test_float_precision_round_trip_with_text(self):
# gh-15140 - This should not segfault on Python 2.7+
df = self.read_csv(StringIO('a'),
float_precision='round_trip',
header=None)
tm.assert_frame_equal(df, DataFrame({0: ['a']}))
def test_large_difference_in_columns(self):
# gh-14125
count = 10000
large_row = ('X,' * count)[:-1] + '\n'
normal_row = 'XXXXXX XXXXXX,111111111111111\n'
test_input = (large_row + normal_row * 6)[:-1]
result = self.read_csv(StringIO(test_input), header=None, usecols=[0])
rows = test_input.split('\n')
expected = DataFrame([row.split(',')[0] for row in rows])
tm.assert_frame_equal(result, expected)
def test_data_after_quote(self):
# see gh-15910
data = 'a\n1\n"b"a'
result = self.read_csv(StringIO(data))
expected = DataFrame({'a': ['1', 'ba']})
tm.assert_frame_equal(result, expected)
@tm.capture_stderr
def test_comment_whitespace_delimited(self):
test_input = """\
1 2
2 2 3
3 2 3 # 3 fields
4 2 3# 3 fields
5 2 # 2 fields
6 2# 2 fields
7 # 1 field, NaN
8# 1 field, NaN
9 2 3 # skipped line
# comment"""
df = self.read_csv(StringIO(test_input), comment='#', header=None,
delimiter='\\s+', skiprows=0,
error_bad_lines=False)
error = sys.stderr.getvalue()
# skipped lines 2, 3, 4, 9
for line_num in (2, 3, 4, 9):
assert 'Skipping line {}'.format(line_num) in error, error
expected = DataFrame([[1, 2],
[5, 2],
[6, 2],
[7, np.nan],
[8, np.nan]])
tm.assert_frame_equal(df, expected)
def test_file_like_no_next(self):
# gh-16530: the file-like need not have a "next" or "__next__"
# attribute despite having an "__iter__" attribute.
#
# NOTE: This is only true for the C engine, not Python engine.
class NoNextBuffer(StringIO):
def __next__(self):
raise AttributeError("No next method")
next = __next__
data = "a\n1"
expected = pd.DataFrame({"a": [1]})
result = self.read_csv(NoNextBuffer(data))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("tar_suffix", [".tar", ".tar.gz"])
def test_read_tarfile(self, tar_suffix):
# see gh-16530
#
# Unfortunately, Python's CSV library can't handle
# tarfile objects (expects string, not bytes when
# iterating through a file-like).
tar_path = os.path.join(self.dirpath, "tar_csv" + tar_suffix)
with tarfile.open(tar_path, "r") as tar:
data_file = tar.extractfile("tar_data.csv")
out = self.read_csv(data_file)
expected = pd.DataFrame({"a": [1]})
tm.assert_frame_equal(out, expected)
@pytest.mark.high_memory
def test_bytes_exceed_2gb(self):
"""Read from a "CSV" that has a column larger than 2GB.
GH 16798
"""
if self.low_memory:
pytest.skip("not a high_memory test")
csv = StringIO('strings\n' + '\n'.join(
['x' * (1 << 20) for _ in range(2100)]))
df = self.read_csv(csv, low_memory=False)
assert not df.empty
|
|
#!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from os.path import join, abspath, dirname, exists
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from shutil import move
from workspace_tools.paths import *
from workspace_tools.utils import mkdir, cmd
from workspace_tools.export import export, setup_user_prj
USR_PRJ_NAME = "usr_prj"
USER_PRJ = join(EXPORT_WORKSPACE, USR_PRJ_NAME)
USER_SRC = join(USER_PRJ, "src")
def setup_test_user_prj():
if exists(USER_PRJ):
print 'Test user project already generated...'
return
setup_user_prj(USER_PRJ, join(TEST_DIR, "rtos", "mbed", "basic"), [join(LIB_DIR, "rtos"), join(LIB_DIR, "tests", "mbed", "env")])
# FAKE BUILD URL
open(join(USER_SRC, "mbed.bld"), 'w').write("http://mbed.org/users/mbed_official/code/mbed/builds/976df7c37ad5\n")
def fake_build_url_resolver(url):
# FAKE BUILD URL: Ignore the URL, always return the path to the mbed library
return {'path':MBED_LIBRARIES, 'name':'mbed'}
def test_export(toolchain, target, expected_error=None):
if toolchain is None and target is None:
base_dir = join(EXPORT_TMP, "zip")
else:
base_dir = join(EXPORT_TMP, toolchain, target)
temp_dir = join(base_dir, "temp")
mkdir(temp_dir)
zip_path, report = export(USER_PRJ, USR_PRJ_NAME, toolchain, target, base_dir, temp_dir, False, None, fake_build_url_resolver)
if report['success']:
move(zip_path, join(EXPORT_DIR, "export_%s_%s.zip" % (toolchain, target)))
print "[OK]"
else:
if expected_error is None:
print '[ERRROR] %s' % report['errormsg']
else:
if (zip_path is None) and (expected_error in report['errormsg']):
print '[OK]'
else:
print '[ERROR]'
print ' zip:', zip_path
print ' msg:', report['errormsg']
if __name__ == '__main__':
setup_test_user_prj()
for toolchain, target in [
('zip', 'LPC1768'),
('emblocks', 'LPC1768'),
('emblocks', 'LPC1549'),
('emblocks', 'LPC1114'),
('emblocks', 'LPC11U35_401'),
('emblocks', 'LPC11U35_501'),
('emblocks', 'LPCCAPPUCCINO'),
('emblocks', 'LPC2368'),
('emblocks', 'STM32F407'),
('emblocks', 'DISCO_F100RB'),
('emblocks', 'DISCO_F051R8'),
('emblocks', 'DISCO_F407VG'),
('emblocks', 'DISCO_F303VC'),
('emblocks', 'NRF51822'),
('emblocks', 'NUCLEO_F401RE'),
('emblocks', 'NUCLEO_F410RB'),
('emblocks', 'NUCLEO_F411RE'),
('emblocks', 'MTS_MDOT_F405RG'),
('emblocks', 'MTS_MDOT_F411RE'),
('coide', 'KL05Z'),
('coide', 'KL25Z'),
('coide', 'LPC1768'),
('coide', 'ARCH_PRO'),
('coide', 'DISCO_F407VG'),
('coide', 'NUCLEO_F401RE'),
('coide', 'NUCLEO_F410RB'),
('coide', 'NUCLEO_F411RE'),
('coide', 'DISCO_F429ZI'),
#('coide', 'DISCO_F469NI'), removed because template not available
('coide', 'NUCLEO_F334R8'),
('coide', 'MTS_MDOT_F405RG'),
('coide', 'MTS_MDOT_F411RE'),
('uvision', 'LPC1768'),
('uvision', 'LPC11U24'),
('uvision', 'KL25Z'),
('uvision', 'LPC1347'),
('uvision', 'LPC1114'),
('uvision', 'LPC4088'),
('uvision', 'LPC4088_DM'),
('uvision', 'LPC4337'),
('uvision', 'HRM1017'),
('uvision', 'B96B_F446VE'),
('uvision', 'NUCLEO_F030R8'),
('uvision', 'NUCLEO_F031K6'),
('uvision', 'NUCLEO_F042K6'),
('uvision', 'NUCLEO_F070RB'),
('uvision', 'NUCLEO_F072RB'),
('uvision', 'NUCLEO_F091RC'),
('uvision', 'NUCLEO_F103RB'),
('uvision', 'NUCLEO_F302R8'),
('uvision', 'NUCLEO_F303K8'),
('uvision', 'NUCLEO_F303RE'),
('uvision', 'NUCLEO_F334R8'),
('uvision', 'NUCLEO_F401RE'),
('uvision', 'NUCLEO_F410RB'),
('uvision', 'NUCLEO_F411RE'),
('uvision', 'NUCLEO_F446RE'),
('uvision', 'NUCLEO_L053R8'),
('uvision', 'NUCLEO_L073RZ'),
('uvision', 'NUCLEO_L152RE'),
('uvision', 'NUCLEO_L476RG'),
('uvision', 'MTS_MDOT_F405RG'),
('uvision', 'MAXWSNENV'),
('uvision', 'MAX32600MBED'),
('uvision', 'DISCO_L053C8'),
('uvision', 'DISCO_F334C8'),
('uvision', 'DISCO_F429ZI'),
('uvision', 'DISCO_F746NG'),
('uvision', 'DISCO_F469NI'),
('uvision', 'DISCO_L476VG'),
('uvision', 'MOTE_L152RC'),
('lpcxpresso', 'LPC1768'),
('lpcxpresso', 'LPC4088'),
('lpcxpresso', 'LPC4088_DM'),
('lpcxpresso', 'LPC1114'),
('lpcxpresso', 'LPC11U35_401'),
('lpcxpresso', 'LPC11U35_501'),
('lpcxpresso', 'LPCCAPPUCCINO'),
('lpcxpresso', 'LPC1549'),
('lpcxpresso', 'LPC11U68'),
# Linux path: /home/emimon01/bin/gcc-cs/bin/
# Windows path: "C:/Program Files (x86)/CodeSourcery/Sourcery_CodeBench_Lite_for_ARM_EABI/bin/"
('codesourcery', 'LPC1768'),
# Linux path: /home/emimon01/bin/gcc-arm/bin/
# Windows path: C:/arm-none-eabi-gcc-4_7/bin/
('gcc_arm', 'LPC1768'),
('gcc_arm', 'LPC4088_DM'),
('gcc_arm', 'LPC1549'),
('gcc_arm', 'LPC1114'),
('gcc_arm', 'LPC11U35_401'),
('gcc_arm', 'LPC11U35_501'),
('gcc_arm', 'LPCCAPPUCCINO'),
('gcc_arm', 'LPC2368'),
('gcc_arm', 'LPC2460'),
('gcc_arm', 'LPC824'),
('gcc_arm', 'SSCI824'),
('gcc_arm', 'B96B_F446VE'),
('gcc_arm', 'STM32F407'),
('gcc_arm', 'DISCO_F100RB'),
('gcc_arm', 'DISCO_F051R8'),
('gcc_arm', 'DISCO_F407VG'),
('gcc_arm', 'DISCO_F303VC'),
('gcc_arm', 'DISCO_L053C8'),
('gcc_arm', 'DISCO_F334C8'),
('gcc_arm', 'DISCO_L053C8'),
('gcc_arm', 'DISCO_F429ZI'),
('gcc_arm', 'DISCO_F746NG'),
('gcc_arm', 'NUCLEO_F031K6'),
('gcc_arm', 'NUCLEO_F042K6'),
('gcc_arm', 'NRF51822'),
('gcc_arm', 'RBLAB_BLENANO'),
('gcc_arm', 'HRM1017'),
('gcc_arm', 'NUCLEO_F401RE'),
('gcc_arm', 'NUCLEO_F410RB'),
('gcc_arm', 'NUCLEO_F411RE'),
('gcc_arm', 'NUCLEO_F446RE'),
('gcc_arm', 'ELMO_F411RE'),
('gcc_arm', 'DISCO_F469NI'),
('gcc_arm', 'NUCLEO_F334R8'),
('gcc_arm', 'MAX32600MBED'),
('gcc_arm', 'MTS_MDOT_F405RG'),
('gcc_arm', 'MTS_MDOT_F411RE'),
('gcc_arm', 'RZ_A1H'),
('gcc_arm', 'MAXWSNENV'),
('gcc_arm', 'MAX32600MBED'),
('gcc_arm', 'ARCH_BLE'),
('gcc_arm', 'ARCH_MAX'),
('gcc_arm', 'ARCH_PRO'),
('gcc_arm', 'DELTA_DFCM_NNN40'),
('gcc_arm', 'K20D50M'),
('gcc_arm', 'K22F'),
('gcc_arm', 'K64F'),
('gcc_arm', 'KL05Z'),
('gcc_arm', 'KL25Z'),
('gcc_arm', 'KL43Z'),
('gcc_arm', 'KL46Z'),
('gcc_arm', 'EFM32GG_STK3700'),
('gcc_arm', 'EFM32LG_STK3600'),
('gcc_arm', 'EFM32WG_STK3800'),
('gcc_arm', 'EFM32ZG_STK3200'),
('gcc_arm', 'EFM32HG_STK3400'),
('ds5_5', 'LPC1768'),
('ds5_5', 'LPC11U24'),
('ds5_5', 'RZ_A1H'),
('iar', 'LPC1768'),
('iar', 'LPC4088_DM'),
('iar', 'LPC1347'),
('iar', 'B96B_F446VE'),
('iar', 'NUCLEO_F030R8'),
('iar', 'NUCLEO_F031K6'),
('iar', 'NUCLEO_F042K6'),
('iar', 'NUCLEO_F070RB'),
('iar', 'NUCLEO_F072RB'),
('iar', 'NUCLEO_F091RC'),
('iar', 'NUCLEO_F302R8'),
('iar', 'NUCLEO_F303K8'),
('iar', 'NUCLEO_F303RE'),
('iar', 'NUCLEO_F334R8'),
('iar', 'NUCLEO_F401RE'),
('iar', 'NUCLEO_F410RB'),
('iar', 'NUCLEO_F411RE'),
('iar', 'NUCLEO_F446RE'),
('iar', 'NUCLEO_L053R8'),
('iar', 'NUCLEO_L073RZ'),
('iar', 'NUCLEO_L152RE'),
('iar', 'NUCLEO_L476RG'),
('iar', 'DISCO_L053C8'),
('iar', 'DISCO_F334C8'),
('iar', 'DISCO_F429ZI'),
('iar', 'DISCO_F469NI'),
('iar', 'DISCO_F746NG'),
('iar', 'DISCO_L476VG'),
('iar', 'STM32F407'),
('iar', 'MTS_MDOT_F405RG'),
('iar', 'MTS_MDOT_F411RE'),
('iar', 'MAXWSNENV'),
('iar', 'MAX32600MBED'),
('iar', 'MOTE_L152RC'),
# Removed following item to avoid script error
#(None, None),
]:
print '\n=== Exporting to "%s::%s" ===' % (toolchain, target)
test_export(toolchain, target)
print "\n=== Test error messages ==="
test_export('lpcxpresso', 'LPC11U24', expected_error='lpcxpresso')
|
|
# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base Driver for DataCore SANsymphony storage array."""
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder import context as cinder_context
from cinder import exception as cinder_exception
from cinder.i18n import _
from cinder import utils as cinder_utils
from cinder.volume import driver
from cinder.volume.drivers.datacore import api
from cinder.volume.drivers.datacore import exception as datacore_exception
from cinder.volume.drivers.datacore import utils as datacore_utils
from cinder.volume.drivers.san import san
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
datacore_opts = [
cfg.StrOpt('datacore_disk_type',
default='single',
choices=['single', 'mirrored'],
help='DataCore virtual disk type (single/mirrored). '
'Mirrored virtual disks require two storage servers in '
'the server group.'),
cfg.StrOpt('datacore_storage_profile',
default=None,
help='DataCore virtual disk storage profile.'),
cfg.ListOpt('datacore_disk_pools',
default=[],
help='List of DataCore disk pools that can be used '
'by volume driver.'),
cfg.IntOpt('datacore_api_timeout',
default=300,
min=1,
help='Seconds to wait for a response from a '
'DataCore API call.'),
cfg.IntOpt('datacore_disk_failed_delay',
default=15,
min=0,
help='Seconds to wait for DataCore virtual '
'disk to come out of the "Failed" state.'),
]
CONF = cfg.CONF
CONF.register_opts(datacore_opts)
class DataCoreVolumeDriver(driver.BaseVD):
"""DataCore SANsymphony base volume driver."""
STORAGE_PROTOCOL = 'N/A'
AWAIT_DISK_ONLINE_INTERVAL = 10
AWAIT_SNAPSHOT_ONLINE_INTERVAL = 10
AWAIT_SNAPSHOT_ONLINE_INITIAL_DELAY = 5
DATACORE_SINGLE_DISK = 'single'
DATACORE_MIRRORED_DISK = 'mirrored'
DATACORE_DISK_TYPE_KEY = 'datacore:disk_type'
DATACORE_STORAGE_PROFILE_KEY = 'datacore:storage_profile'
DATACORE_DISK_POOLS_KEY = 'datacore:disk_pools'
VALID_VOLUME_TYPE_KEYS = (DATACORE_DISK_TYPE_KEY,
DATACORE_STORAGE_PROFILE_KEY,
DATACORE_DISK_POOLS_KEY,)
def __init__(self, *args, **kwargs):
super(DataCoreVolumeDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(san.san_opts)
self.configuration.append_config_values(datacore_opts)
self._api = None
self._default_volume_options = None
def do_setup(self, context):
"""Perform validations and establish connection to server.
:param context: Context information
"""
required_params = [
'san_ip',
'san_login',
'san_password',
]
for param in required_params:
if not getattr(self.configuration, param, None):
raise cinder_exception.InvalidInput(_("%s not set.") % param)
self._api = api.DataCoreClient(
self.configuration.san_ip,
self.configuration.san_login,
self.configuration.san_password,
self.configuration.datacore_api_timeout)
disk_type = self.configuration.datacore_disk_type
if disk_type:
disk_type = disk_type.lower()
storage_profile = self.configuration.datacore_storage_profile
if storage_profile:
storage_profile = storage_profile.lower()
disk_pools = self.configuration.datacore_disk_pools
if disk_pools:
disk_pools = [pool.lower() for pool in disk_pools]
self._default_volume_options = {
self.DATACORE_DISK_TYPE_KEY: disk_type,
self.DATACORE_STORAGE_PROFILE_KEY: storage_profile,
self.DATACORE_DISK_POOLS_KEY: disk_pools,
}
def check_for_setup_error(self):
pass
def get_volume_backend_name(self):
"""Get volume backend name of the volume service.
:return: Volume backend name
"""
backend_name = self.configuration.safe_get('volume_backend_name')
return (backend_name or
'datacore_' + self.get_storage_protocol().lower())
def get_storage_protocol(self):
"""Get storage protocol of the volume backend.
:return: Storage protocol
"""
return self.STORAGE_PROTOCOL
def get_volume_stats(self, refresh=False):
"""Obtain status of the volume service.
:param refresh: Whether to get refreshed information
"""
if refresh:
self._update_volume_stats()
return self._stats
def create_volume(self, volume):
"""Creates a volume.
:param volume: Volume object
:return: Dictionary of changes to the volume object to be persisted
"""
volume_options = self._get_volume_options(volume)
disk_type = volume_options[self.DATACORE_DISK_TYPE_KEY]
if disk_type == self.DATACORE_MIRRORED_DISK:
logical_disk_count = 2
virtual_disk_type = 'MultiPathMirrored'
elif disk_type == self.DATACORE_SINGLE_DISK:
logical_disk_count = 1
virtual_disk_type = 'NonMirrored'
else:
msg = _("Virtual disk type '%s' is not valid.") % disk_type
LOG.error(msg)
raise cinder_exception.VolumeDriverException(message=msg)
profile_id = self._get_storage_profile_id(
volume_options[self.DATACORE_STORAGE_PROFILE_KEY])
pools = datacore_utils.get_distinct_by(
lambda pool: pool.ServerId,
self._get_available_disk_pools(
volume_options[self.DATACORE_DISK_POOLS_KEY]))
if len(pools) < logical_disk_count:
msg = _("Suitable disk pools were not found for "
"creating virtual disk.")
LOG.error(msg)
raise cinder_exception.VolumeDriverException(message=msg)
disk_size = self._get_size_in_bytes(volume['size'])
logical_disks = []
virtual_disk = None
try:
for logical_disk_pool in pools[:logical_disk_count]:
logical_disks.append(
self._api.create_pool_logical_disk(
logical_disk_pool.Id, 'Striped', disk_size))
virtual_disk_data = self._api.build_virtual_disk_data(
volume['id'],
virtual_disk_type,
disk_size,
volume['display_name'],
profile_id)
virtual_disk = self._api.create_virtual_disk_ex2(
virtual_disk_data,
logical_disks[0].Id,
logical_disks[1].Id if logical_disk_count == 2 else None,
True)
virtual_disk = self._await_virtual_disk_online(virtual_disk.Id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Creation of volume %(volume)s failed.",
{'volume': volume['id']})
try:
if virtual_disk:
self._api.delete_virtual_disk(virtual_disk.Id, True)
else:
for logical_disk in logical_disks:
self._api.delete_logical_disk(logical_disk.Id)
except datacore_exception.DataCoreException as e:
LOG.warning("An error occurred on a cleanup after failed "
"creation of volume %(volume)s: %(error)s.",
{'volume': volume['id'], 'error': e})
return {'provider_location': virtual_disk.Id}
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
:param volume: Volume object
:param snapshot: Snapshot object
:return: Dictionary of changes to the volume object to be persisted
"""
return self._create_volume_from(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
"""Creates volume clone.
:param volume: New Volume object
:param src_vref: Volume object that must be cloned
:return: Dictionary of changes to the volume object to be persisted
"""
return self._create_volume_from(volume, src_vref)
def extend_volume(self, volume, new_size):
"""Extend an existing volume's size.
:param volume: Volume object
:param new_size: new size in GB to extend this volume to
"""
virtual_disk = self._get_virtual_disk_for(volume, raise_not_found=True)
self._set_virtual_disk_size(virtual_disk,
self._get_size_in_bytes(new_size))
def delete_volume(self, volume):
"""Deletes a volume.
:param volume: Volume object
"""
virtual_disk = self._get_virtual_disk_for(volume)
if virtual_disk:
if virtual_disk.IsServed:
logical_disks = self._api.get_logical_disks()
logical_units = self._api.get_logical_units()
target_devices = self._api.get_target_devices()
logical_disks = [disk.Id for disk in logical_disks
if disk.VirtualDiskId == virtual_disk.Id]
logical_unit_devices = [unit.VirtualTargetDeviceId
for unit in logical_units
if unit.LogicalDiskId in logical_disks]
initiator_ports = set(device.InitiatorPortId
for device in target_devices
if device.Id in logical_unit_devices)
for port in initiator_ports:
self._api.unserve_virtual_disks_from_port(
port, [virtual_disk.Id])
self._api.delete_virtual_disk(virtual_disk.Id, True)
def create_snapshot(self, snapshot):
"""Creates a snapshot.
:param snapshot: Snapshot object
:return: Dictionary of changes to the snapshot object to be persisted
"""
src_virtual_disk = self._get_virtual_disk_for(snapshot['volume'],
raise_not_found=True)
volume_options = self._get_volume_options(snapshot['volume'])
profile_name = volume_options[self.DATACORE_STORAGE_PROFILE_KEY]
profile_id = self._get_storage_profile_id(profile_name)
pool_names = volume_options[self.DATACORE_DISK_POOLS_KEY]
if src_virtual_disk.DiskStatus != 'Online':
LOG.warning("Attempting to make a snapshot from virtual disk "
"%(disk)s that is in %(state)s state.",
{'disk': src_virtual_disk.Id,
'state': src_virtual_disk.DiskStatus})
snapshot_virtual_disk = self._create_virtual_disk_copy(
src_virtual_disk,
snapshot['id'],
snapshot['display_name'],
profile_id=profile_id,
pool_names=pool_names)
return {'provider_location': snapshot_virtual_disk.Id}
def delete_snapshot(self, snapshot):
"""Deletes a snapshot.
:param snapshot: Snapshot object
"""
snapshot_virtual_disk = self._get_virtual_disk_for(snapshot)
if snapshot_virtual_disk:
self._api.delete_virtual_disk(snapshot_virtual_disk.Id, True)
def ensure_export(self, context, volume):
pass
def create_export(self, context, volume, connector):
pass
def remove_export(self, context, volume):
pass
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector.
:param volume: Volume object
:param connector: Connector information
"""
virtual_disk = self._get_virtual_disk_for(volume)
if virtual_disk:
if connector:
clients = [self._get_client(connector['host'],
create_new=False)]
else:
clients = self._api.get_clients()
server_group = self._get_our_server_group()
@cinder_utils.synchronized(
'datacore-backend-%s' % server_group.Id, external=True)
def unserve_virtual_disk(client_id):
self._api.unserve_virtual_disks_from_host(
client_id, [virtual_disk.Id])
for client in clients:
unserve_virtual_disk(client.Id)
def _update_volume_stats(self):
performance_data = self._api.get_performance_by_type(
['DiskPoolPerformance'])
total = 0
available = 0
reserved = 0
for performance in performance_data:
missing_perf_data = []
if hasattr(performance.PerformanceData, 'BytesTotal'):
total += performance.PerformanceData.BytesTotal
else:
missing_perf_data.append('BytesTotal')
if hasattr(performance.PerformanceData, 'BytesAvailable'):
available += performance.PerformanceData.BytesAvailable
else:
missing_perf_data.append('BytesAvailable')
if hasattr(performance.PerformanceData, 'BytesReserved'):
reserved += performance.PerformanceData.BytesReserved
else:
missing_perf_data.append('BytesReserved')
if missing_perf_data:
LOG.warning("Performance data %(data)s is missing for "
"disk pool %(pool)s",
{'data': missing_perf_data,
'pool': performance.ObjectId})
provisioned = 0
logical_disks = self._api.get_logical_disks()
for disk in logical_disks:
if getattr(disk, 'PoolId', None):
provisioned += disk.Size.Value
total_capacity_gb = self._get_size_in_gigabytes(total)
free = available + reserved
free_capacity_gb = self._get_size_in_gigabytes(free)
provisioned_capacity_gb = self._get_size_in_gigabytes(provisioned)
reserved_percentage = 100.0 * reserved / total if total else 0.0
ratio = self.configuration.max_over_subscription_ratio
stats_data = {
'vendor_name': 'DataCore',
'QoS_support': False,
'volume_backend_name': self.get_volume_backend_name(),
'driver_version': self.get_version(),
'storage_protocol': self.get_storage_protocol(),
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
'provisioned_capacity_gb': provisioned_capacity_gb,
'reserved_percentage': reserved_percentage,
'max_over_subscription_ratio': ratio,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
}
self._stats = stats_data
def _get_our_server_group(self):
server_group = datacore_utils.get_first(lambda group: group.OurGroup,
self._api.get_server_groups())
return server_group
def _get_volume_options_from_type(self, type_id, default_options):
options = dict(default_options.items())
if type_id:
admin_context = cinder_context.get_admin_context()
volume_type = volume_types.get_volume_type(admin_context, type_id)
specs = dict(volume_type).get('extra_specs')
for key, value in six.iteritems(specs):
if key in self.VALID_VOLUME_TYPE_KEYS:
if key == self.DATACORE_DISK_POOLS_KEY:
options[key] = [v.strip().lower()
for v in value.split(',')]
else:
options[key] = value.lower()
return options
def _get_volume_options(self, volume):
type_id = volume['volume_type_id']
volume_options = self._get_volume_options_from_type(
type_id, self._default_volume_options)
return volume_options
def _get_online_servers(self):
servers = self._api.get_servers()
online_servers = [server for server in servers
if server.State == 'Online']
return online_servers
def _get_available_disk_pools(self, disk_pool_names=None):
online_servers = [server.Id for server in self._get_online_servers()]
pool_performance = {
performance.ObjectId: performance.PerformanceData for performance
in self._api.get_performance_by_type(['DiskPoolPerformance'])}
disk_pools = self._api.get_disk_pools()
lower_disk_pool_names = ([name.lower() for name in disk_pool_names]
if disk_pool_names else [])
available_disk_pools = [
pool for pool in disk_pools
if (self._is_pool_healthy(pool, pool_performance, online_servers)
and (not lower_disk_pool_names
or pool.Caption.lower() in lower_disk_pool_names))]
available_disk_pools.sort(
key=lambda p: pool_performance[p.Id].BytesAvailable, reverse=True)
return available_disk_pools
def _get_virtual_disk_for(self, obj, raise_not_found=False):
disk_id = obj.get('provider_location')
virtual_disk = datacore_utils.get_first_or_default(
lambda disk: disk.Id == disk_id,
self._api.get_virtual_disks(),
None)
if not virtual_disk:
msg = (_("Virtual disk not found for %(object)s %(object_id)s.")
% {'object': obj.__class__.__name__.lower(),
'object_id': obj['id']})
if raise_not_found:
LOG.error(msg)
raise cinder_exception.VolumeDriverException(message=msg)
else:
LOG.warning(msg)
return virtual_disk
def _set_virtual_disk_size(self, virtual_disk, new_size):
return self._api.set_virtual_disk_size(virtual_disk.Id, new_size)
def _get_storage_profile(self, profile_name, raise_not_found=False):
profiles = self._api.get_storage_profiles()
profile = datacore_utils.get_first_or_default(
lambda p: p.Caption.lower() == profile_name.lower(),
profiles,
None)
if not profile and raise_not_found:
msg = (_("Specified storage profile %s not found.")
% profile_name)
LOG.error(msg)
raise cinder_exception.VolumeDriverException(message=msg)
return profile
def _get_storage_profile_id(self, profile_name):
profile_id = None
if profile_name:
profile = self._get_storage_profile(profile_name,
raise_not_found=True)
profile_id = profile.Id
return profile_id
def _await_virtual_disk_online(self, virtual_disk_id):
def inner(start_time):
disk_failed_delay = self.configuration.datacore_disk_failed_delay
virtual_disk = datacore_utils.get_first(
lambda disk: disk.Id == virtual_disk_id,
self._api.get_virtual_disks())
if virtual_disk.DiskStatus == 'Online':
raise loopingcall.LoopingCallDone(virtual_disk)
elif (virtual_disk.DiskStatus != 'FailedRedundancy'
and time.time() - start_time >= disk_failed_delay):
msg = (_("Virtual disk %(disk)s did not come out of the "
"%(state)s state after %(timeout)s seconds.")
% {'disk': virtual_disk.Id,
'state': virtual_disk.DiskStatus,
'timeout': disk_failed_delay})
LOG.error(msg)
raise cinder_exception.VolumeDriverException(message=msg)
inner_loop = loopingcall.FixedIntervalLoopingCall(inner, time.time())
return inner_loop.start(self.AWAIT_DISK_ONLINE_INTERVAL).wait()
def _create_volume_from(self, volume, src_obj):
src_virtual_disk = self._get_virtual_disk_for(src_obj,
raise_not_found=True)
if src_virtual_disk.DiskStatus != 'Online':
LOG.warning("Attempting to create a volume from virtual disk "
"%(disk)s that is in %(state)s state.",
{'disk': src_virtual_disk.Id,
'state': src_virtual_disk.DiskStatus})
volume_options = self._get_volume_options(volume)
profile_id = self._get_storage_profile_id(
volume_options[self.DATACORE_STORAGE_PROFILE_KEY])
pool_names = volume_options[self.DATACORE_DISK_POOLS_KEY]
volume_virtual_disk = self._create_virtual_disk_copy(
src_virtual_disk,
volume['id'],
volume['display_name'],
profile_id=profile_id,
pool_names=pool_names)
volume_logical_disk = datacore_utils.get_first(
lambda disk: disk.VirtualDiskId == volume_virtual_disk.Id,
self._api.get_logical_disks())
try:
volume_virtual_disk = self._set_virtual_disk_size(
volume_virtual_disk,
self._get_size_in_bytes(volume['size']))
disk_type = volume_options[self.DATACORE_DISK_TYPE_KEY]
if disk_type == self.DATACORE_MIRRORED_DISK:
pools = self._get_available_disk_pools(pool_names)
selected_pool = datacore_utils.get_first_or_default(
lambda pool: (
pool.ServerId != volume_logical_disk.ServerHostId
and pool.Id != volume_logical_disk.PoolId),
pools,
None)
if selected_pool:
logical_disk = self._api.create_pool_logical_disk(
selected_pool.Id,
'Striped',
volume_virtual_disk.Size.Value)
self._api.bind_logical_disk(volume_virtual_disk.Id,
logical_disk.Id,
'Second',
True,
False,
True)
else:
msg = _("Can not create mirrored virtual disk. "
"Suitable disk pools not found.")
LOG.error(msg)
raise cinder_exception.VolumeDriverException(message=msg)
volume_virtual_disk = self._await_virtual_disk_online(
volume_virtual_disk.Id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Creation of volume %(volume)s failed.",
{'volume': volume['id']})
try:
self._api.delete_virtual_disk(volume_virtual_disk.Id, True)
except datacore_exception.DataCoreException as e:
LOG.warning("An error occurred on a cleanup after failed "
"creation of volume %(volume)s: %(error)s.",
{'volume': volume['id'], 'error': e})
return {'provider_location': volume_virtual_disk.Id}
def _create_full_snapshot(self, description, name, pool_names, profile_id,
src_virtual_disk):
pools = self._get_available_disk_pools(pool_names)
destination_pool = datacore_utils.get_first_or_default(
lambda pool: (pool.ServerId == src_virtual_disk.FirstHostId
or pool.ServerId == src_virtual_disk.SecondHostId),
pools,
None)
if not destination_pool:
msg = _("Suitable snapshot destination disk pool not found for "
"virtual disk %s.") % src_virtual_disk.Id
LOG.error(msg)
raise cinder_exception.VolumeDriverException(message=msg)
server = datacore_utils.get_first(
lambda srv: srv.Id == destination_pool.ServerId,
self._api.get_servers())
if not server.SnapshotMapStorePoolId:
self._api.designate_map_store(destination_pool.Id)
snapshot = self._api.create_snapshot(src_virtual_disk.Id,
name,
description,
destination_pool.Id,
'Full',
False,
profile_id)
return snapshot
def _await_snapshot_migrated(self, snapshot_id):
def inner():
snapshot_data = datacore_utils.get_first(
lambda snapshot: snapshot.Id == snapshot_id,
self._api.get_snapshots())
if snapshot_data.State == 'Migrated':
raise loopingcall.LoopingCallDone(snapshot_data)
elif (snapshot_data.State != 'Healthy'
and snapshot_data.Failure != 'NoFailure'):
msg = (_("Full migration of snapshot %(snapshot)s failed. "
"Snapshot is in %(state)s state.")
% {'snapshot': snapshot_data.Id,
'state': snapshot_data.State})
LOG.error(msg)
raise cinder_exception.VolumeDriverException(message=msg)
loop = loopingcall.FixedIntervalLoopingCall(inner)
return loop.start(self.AWAIT_SNAPSHOT_ONLINE_INTERVAL,
self.AWAIT_SNAPSHOT_ONLINE_INITIAL_DELAY).wait()
def _create_virtual_disk_copy(self, src_virtual_disk, name, description,
profile_id=None, pool_names=None):
snapshot = self._create_full_snapshot(
description, name, pool_names, profile_id, src_virtual_disk)
try:
snapshot = self._await_snapshot_migrated(snapshot.Id)
self._api.delete_snapshot(snapshot.Id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Split operation failed for snapshot "
"%(snapshot)s.", {'snapshot': snapshot.Id})
try:
logical_disk_copy = datacore_utils.get_first(
lambda disk: (
disk.Id == snapshot.DestinationLogicalDiskId),
self._api.get_logical_disks())
virtual_disk_copy = datacore_utils.get_first(
lambda disk: (
disk.Id == logical_disk_copy.VirtualDiskId),
self._api.get_virtual_disks())
self._api.delete_virtual_disk(virtual_disk_copy.Id, True)
except datacore_exception.DataCoreException as e:
LOG.warning("An error occurred on a cleanup after failed "
"split of snapshot %(snapshot)s: %(error)s.",
{'snapshot': snapshot.Id, 'error': e})
logical_disk_copy = datacore_utils.get_first(
lambda disk: disk.Id == snapshot.DestinationLogicalDiskId,
self._api.get_logical_disks())
virtual_disk_copy = datacore_utils.get_first(
lambda disk: disk.Id == logical_disk_copy.VirtualDiskId,
self._api.get_virtual_disks())
return virtual_disk_copy
def _get_client(self, name, create_new=False):
client_hosts = self._api.get_clients()
client = datacore_utils.get_first_or_default(
lambda host: host.HostName == name, client_hosts, None)
if create_new:
if not client:
client = self._api.register_client(
name, None, 'Other', 'PreferredServer', None)
self._api.set_client_capabilities(client.Id, True, True)
return client
@staticmethod
def _is_pool_healthy(pool, pool_performance, online_servers):
if (pool.PoolStatus == 'Running'
and hasattr(pool_performance[pool.Id], 'BytesAvailable')
and pool.ServerId in online_servers):
return True
return False
@staticmethod
def _get_size_in_bytes(size_in_gigabytes):
return size_in_gigabytes * units.Gi
@staticmethod
def _get_size_in_gigabytes(size_in_bytes):
return size_in_bytes / float(units.Gi)
|
|
"""Interface to access face detectors from the dlib library.
"""
# standard imports
import os
import sys
import logging
# third party imports
import numpy as np
import dlib
# toolbox imports
from ..base.meta import Metadata
from ..base.image import BoundingBox
from ..tool.face.detector import Detector as FaceDetector
from ..tool.face.landmarks import (Detector as LandmarkDetector,
FacialLandmarks68)
# logging
LOG = logging.getLogger(__name__)
class DetectorHOG(FaceDetector):
# pylint: disable=too-many-ancestors
"""The dlib HOG face detector.
Attributes
----------
_detector: dlib.fhog_object_detector
"""
def __init__(self, **kwargs) -> None:
"""Initialize this :py:class:`DetectorHOG`.
"""
super().__init__(**kwargs)
self._detector = None
def _prepare(self, **kwargs) -> None:
"""Prepare this :py:class:`DetectorHOG` by loading the model data.
"""
super()._prepare(**kwargs)
self._detector = dlib.get_frontal_face_detector()
def _unprepare(self):
"""Release the resources acquired by :py:class:`DetectorHOG`.
"""
self._detector = None
super()._unprepare()
def _prepared(self) -> bool:
"""The DetectorHOG is prepared, once the model data
have been loaded.
"""
return (self._detector is not None) and super()._prepared()
def _detect(self, image: np.ndarray, **kwargs) -> Metadata:
"""Apply the dlib histogram of gradients detector (HOG) to
detect faces in the given image.
Arguments
---------
image:
An image in an appropriate format for detection with the
DLib HOG detector. This means, an `uint8` grayscale or
RGB image.
"""
if self._detector is None:
return None
LOG.debug("Calling dlib detector with %s of shape %s of type %s",
type(image), image.shape, image.dtype)
# FIXME[hack]: make sure image is in correct format
# (should be done) by preprocessing ...
if image.dtype is not np.uint8:
image = image.astype(np.uint8)
# dlib: image must be 8bit gray or RGB image.
rects = self._detector(image, 2)
detections = Metadata(
description='Detections by the DLib HOG detector')
for rect in rects:
detections.add_region(BoundingBox(x=rect.left(), y=rect.top(),
width=rect.width(),
height=rect.height()))
return detections
class DetectorCNN(FaceDetector):
# pylint: disable=too-many-ancestors
"""The dlib CNN detector.
_model_file: str
_detector: dlib.cnn_face_detection_model_v1
"""
def __init__(self, *args, model_file='mmod_human_face_detector.dat',
**kwargs) -> None:
"""The OpenCV Single Shot MultiBox Detector (SSD).
Arguments
---------
dnn: str
The model to use. There are currently two models available:
'CAFFE' is the original 16-bit floating point model trained
with Caffe, and 'TF' is a 8-bit quantized version for TensorFlow.
"""
super().__init__(*args, **kwargs)
self._detector = None
self._model_file = None
self.set_model_file(model_file)
def set_model_file(self, model_file) -> None:
"""Set the model file for this :py:class:`DetectorCNN`.
"""
if not os.path.isabs(model_file):
dlib_model_directory = os.environ.get('DLIB_MODELS', '.')
model_file = os.path.join(dlib_model_directory, model_file)
if model_file != self._model_file:
self._model_file = model_file
self._add_requirement('model', 'file', model_file)
def _prepare(self, **kwargs) -> None:
"""Prepare this :py:class:`DetectorCNN`.
"""
super()._prepare(**kwargs)
self._detector = dlib.cnn_face_detection_model_v1(self._model_file)
def _unprepare(self) -> None:
"""Release resources acquired by this :py:class:`DetectorCNN`.
"""
self._detector = None
super()._unprepare()
def _prepared(self) -> bool:
"""Release resources acquired by this :py:class:`DetectorCNN`.
"""
return (self._detector is not None) and super()._prepared()
def _detect(self, image: np.ndarray, **kwargs) -> Metadata:
"""The dlib CNN face detector.
"""
# The dlib detector expects images to be 8-bit RGB or grayscale
if image.dtype is not np.uint8: # FIXME[hack]:
# FIXME[todo]: better solution: make sure that images are
# provided in correct format or provide preprocessing ...
print(f"dlib: error: image is {image.dtype} "
f"(min={image.min()}, max={image.max()}) "
"but should be np.uint8!", file=sys.stderr)
image = image.astype(np.uint8)
# It is also possible to pass a list of images to the
# detector - like this:
# dets = detector([image # list], upsample_num, batch_size = 128)
# In this case it will return a mmod_rectangless object. This object
# behaves just like a list of lists and can be iterated over.
detections = self._detector(image, 2)
# The result is of type dlib.mmod_rectangles, which is
# basically a list of rectangles annotated with conficence
# values. For an individual detection d (of type
# dlib.mmode_rectangle), the information can be accessed by
# d.rect and d.confidence.
result = Metadata(
description='Detections by the dlib CNN face detector')
for detection in detections:
rect = detection.rect
result.add_region(BoundingBox(x=rect.left(), y=rect.top(),
width=rect.width(),
height=rect.height()),
confidence=detection.confidence)
return result
class FacialLandmarkDetector(LandmarkDetector):
# pylint: disable=too-many-ancestors
# FIXME[concept]: this class first applies a face detector to find faces
# in a large image and then applies the landmark detector. This seems
# to be a commmon situation for which we should provide an API.
"""A facial landmark detector based on dlib.
Attributes
----------
_detector: dlib.fhog_object_detector
_predictor: dlib.shape_predictor
"""
def __init__(self, model_file: str = None, **kwargs):
# shape_predictor_5_face_landmarks.dat
# shape_predictor_68_face_landmarks.dat
super().__init__(**kwargs)
self._predictor = None
self._detector = None
# FIXME[question]: what is going on here?
#
# The dlib facial landmark detector
#
if not os.path.exists(model_file):
predictor_name = os.path.join(os.environ.get('DLIB_MODELS', '.'),
model_file)
if os.path.exists(predictor_name):
self._predictor = dlib.shape_predictor(model_file)
else:
print(f"FIXME: not found '{predictor_name}'")
raise ValueError(f"Dlib predictor model file ''{predictor_name}' "
"not found.")
def _prepare(self, **kwargs) -> None:
"""Prepare this DetectorHOG by loading the model data.
"""
super()._prepare(**kwargs)
self._detector = dlib.get_frontal_face_detector()
def _unprepare(self):
"""Unprepare this DetectorHOG by releasing acquired resources.
"""
self._detector = None
super()._unprepare()
def _prepared(self) -> bool:
"""The DetectorHOG is prepared, once the model data
have been loaded.
"""
return (self._detector is not None) and super()._prepared()
@staticmethod
def _detection_to_landmarks(detection: dlib.full_object_detection):
# from imutils.face_utils.shape_to_np:
# initialize the list of (x, y)-coordinates
points = np.ndarray((detection.num_parts, 2)) # dtype=dtype
# loop over all facial landmarks and convert them
# to a 2-tuple of (x, y)-coordinates
for i in range(0, detection.num_parts):
points[i] = (detection.part(i).x, detection.part(i).y)
# Construct a Metdata object holding the detected landmarks
return FacialLandmarks68(points)
#
# Detection
#
def _detect(self, image: np.ndarray, box: BoundingBox = None,
**kwargs) -> Metadata:
# pylint: disable=arguments-differ
"""Do the actual facial landmark detection.
Notice, that the facial detector expects to work on a single face,
not on a complete image with multiple faces.
That is, the image provided to this function may be a crop
from a larger image.
"""
# The dlib.shape_predictor takes an image region, provided
# by an image and a rectangle. As in our API, the facial
# landmark detector alread expects cropped image region
# as argument, we simply set the rectangle to include the
# whole image.
rect = (dlib.rectangle(0, 0, image.shape[1], image.shape[0])
if box is None else
dlib.rectangle(box.x, box.y, box.width, box.height))
detection = self._predictor(image, rect)
# detection is of type dlib.full_object_detection, which basically
# is a list of N points. These can be transformed into
# a two dimensional array of shape (N,2) for further processing.
metadata = Metadata(
description='Facial landmarks detectec by the dlib detctor')
metadata.add_region(self._detection_landmarks(detection))
return metadata
|
|
# Copyright (C) Schweizerische Bundesbahnen SBB, 2016
# Python 3.4
__author__ = 'florianseidl'
from base64 import b64encode
from urllib import request
from urllib.request import urlopen, HTTPError, URLError, ContentTooShortError, Request
from time import sleep
from threading import Condition
import logging
import ssl
import sys
from os import path
from configutil import decrypt
from cimon import find_config_file_path
logger = logging.getLogger(__name__)
def create_http_client(base_url, username = None, password = None, jwt_login_url= None, saml_login_url=None, fixed_headers=None, verify_ssl=True, client_cert=None):
ssl_config = SslConfig(verify_ssl, client_cert)
if jwt_login_url:
return HttpClient(base_url=base_url,
authentication_handler=JwtAuthenticationHandler(username=username, password=password, jwt_login_url=jwt_login_url, ssl_config=ssl_config),
ssl_config=ssl_config)
elif saml_login_url:
return HttpClient(base_url=base_url,
authentication_handler=SamlAuthenticationHandler(username=username, password=password, saml_login_url=saml_login_url, ssl_config=ssl_config),
ssl_config=ssl_config)
elif username:
return HttpClient(base_url=base_url,
authentication_handler=BasicAuthenticationHandler(username=username, password=password),
ssl_config=ssl_config)
elif fixed_headers:
return HttpClient(base_url=base_url,
authentication_handler=FixedHeaderAuthenticationHandler(headers=fixed_headers),
ssl_config=ssl_config)
else:
return HttpClient(base_url=base_url, ssl_config=ssl_config)
# Base classes to build collectors.
#
# Currently includes a HTTP Client with handlers for different kinds of authentication
#
def configure_client_cert(config, key=None):
if not config:
return None
return ClientCert(
find_config_file_path(config['certfile']),
find_config_file_path(config['keyfile']),
decrypt(config.get('passwordEncrypted', None), key))
# encrypt the certificate key using: openssl rsa -aes256 -in client.key -passout pass:<insert password here> -out client_enc.key
class ClientCert:
def __init__(self, certfile, keyfile, password):
if not path.isfile(certfile):
raise FileNotFoundError(certfile)
if not path.isfile(keyfile):
raise FileNotFoundError(keyfile)
self.certfile = certfile
self.keyfile = keyfile
self.password = password
def add_to(self,ctx):
logger.info("Adding client certificate stored in %s", self.certfile)
ctx.load_cert_chain(self.certfile, self.keyfile, self.password)
class SslConfig:
def __init__(self, verify_ssl=True, client_cert=None):
ctx = ssl.create_default_context()
if not verify_ssl:
# verification activated, default will be fine
self.__disable_ssl_verification__(ctx)
if client_cert:
client_cert.add_to(ctx)
if sys.version_info < (3,4,3):
logger.warning("Python version 3.4.3, using alternative global config")
request.install_opener(request.build_opener(request.HTTPSHandler(context=ctx, check_hostname=verify_ssl)))
self.ctx = None
else:
self.ctx = ctx
def __disable_ssl_verification__(self, ctx):
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
logger.info("SSL validation disabled")
class EmptyAuthenticationHandler:
""" Implement no authentication so HttpClient is not forced to check for the presence of an authentication handler """
def request_headers(self):
return {}
def handle_forbidden(self, request_headers, status_code):
return False # does not authenticate
class BasicAuthenticationHandler():
""" Authenticate using RFC """
def __init__(self, username, password):
# basic authentication
self.auth = b'Basic ' + b64encode(('%s:%s' % (username, password)).encode('utf-8'))
def request_headers(self):
return { "Authorization" : self.auth }
def handle_forbidden(self, request_headers, status_code):
return True # retry
class TokenBasedAuthenticationHandler():
""" Authenticate via a Token drawn from a configured login url """
def __init__(self, username, password, login_url, ssl_config=SslConfig()):
self.login_http_client = HttpClient(login_url, BasicAuthenticationHandler(username, password), ssl_config=ssl_config)
self.token = None
self.is_renewing = False
self.renewing = Condition()
def request_headers(self):
if self.is_renewing: # avoid lock at the cost of sometimes missing, sending mutiple requests and getting more than one 40x
with self.renewing:
# wait for one thread to renew the lock
while self.is_renewing:
self.renewing.wait()
if not self.token:
self.login({})
return { self.request_header_name : self.token if self.token else "no token received" }
def handle_forbidden(self, request_headers, status_code):
self.login(request_headers)
# retry whether there is a new token or not....
return True
def login(self, request_headers):
# only one of the threads will get the jwt token
with self.renewing:
# check if another thread as allready set the cookie to a different value
if self.token == request_headers.get(self.request_header_name, None):
try:
self.is_renewing = True
self.token = self.__renew_token__()
finally:
self.is_renewing = False
self.renewing.notify_all()
def __renew_token__(self):
# looks as if we have to aquire a (new) JWT Token....
logger.debug("Requesting new Token from %s...", self.login_http_client.base_url)
response = self.login_http_client.open()
token = response.getheader(self.response_header_name)
if token:
logger.info("Received new Token")
logger.debug("New Token: '%s...'", token[:42]) # log only start in order to avoid leak
return token
else:
logger.error("Failed to renew Token, did not receive an %s header" % self.response_header_name)
return self.token # try with old token, will try another login if token is invalid....
class JwtAuthenticationHandler(TokenBasedAuthenticationHandler):
""" Authenticate via JWT Tokens as implemented in SBB WSG """
def __init__(self, username, password, jwt_login_url, ssl_config=SslConfig()):
super().__init__(username=username, password=password, login_url=jwt_login_url, ssl_config=ssl_config)
self.request_header_name="Authorization"
self.response_header_name="Authorization"
class SamlAuthenticationHandler(TokenBasedAuthenticationHandler):
""" Authenticate via SAML Cookies as implemented in SBB WSG """
def __init__(self, username, password, saml_login_url, ssl_config=SslConfig()):
super().__init__(username=username, password=password, login_url=saml_login_url, ssl_config=ssl_config)
self.request_header_name="Cookie"
self.response_header_name="Set-Cookie"
class FixedHeaderAuthenticationHandler:
def __init__(self, headers):
self.headers = headers
""" Authenticate by using a fixed header like an api key"""
def request_headers(self):
return self.headers
def handle_forbidden(self, request_headers, status_code):
return False # no action possible
class HttpClient:
""" A HttpClient able to do authentication via
- Retry: Retry for instance 401 as this does sometimes help (Bug in SBB eBiz/LDAP)
- BasicAuthentication: Username/Password - Use for instance from within SBB LAN. Also supports retry.
- JWTAuthentication: JWT using a specific Login URL and HTTP Authorization Headers for use with SBB Webservice Gateway (WSG) - access from outside SBB LAN
- SamlAuthentication: SAML using a specific Login URL and HTTP Set-Cookie and Cookie Headers for use with SBB Webservice Gateway (WSG) - access from outside SBB LAN
Will retry status code 5xx and if told so by authentication handler max_retries times (default 3 times)"""
def __init__(self, base_url, authentication_handler=EmptyAuthenticationHandler(), max_retries=3, retry_delay_sec=3, ssl_config=SslConfig()):
self.base_url = base_url
self.authentication_handler = authentication_handler
self.max_retries = max_retries
self.retry_delay_sec = retry_delay_sec
self.ssl_config = ssl_config
logger.debug("Created http client")
def open_and_read(self, request_path=None):
response = self.open(request_path)
return response.read().decode(response.headers.get_content_charset() or "utf-8")
def open(self, request_path=None, retry=0):
request_headers = self.authentication_handler.request_headers()
try:
request = Request(self.__request_url__(request_path))
logger.debug("Request to %s", self.__request_url__(request_path))
for key, value in request_headers.items():
request.add_header(key, value)
logger.debug("Request headers: %s" % request.headers.keys()) # do not log contents to avoid leak
return self.__open__(request)
except HTTPError as e:
if e.code in (401,402,403,407,408) and retry < self.max_retries and self.authentication_handler.handle_forbidden(request_headers, e.code): # maybe authentication issue
return self.__retry__("Potential authentication status code %d" % e.code, request_path, retry);
elif e.code >= 500 and retry < self.max_retries: # retry server side error (may be temporary), max 3 attempts
return self.__retry__("Temporary error %d %s" % (e.code, e.reason), request_path, retry);
else:
self.__try__log_contents__(e)
raise e
except (URLError, ContentTooShortError) as e:
if retry < self.max_retries:
return self.__retry__("Error %s" % str(e), request_path, retry)
else:
raise e
def __retry__(self, text, request_path, retry):
logger.info("%s requesting %s, retry %s", text, self.__request_url__(request_path), retry)
sleep(retry * self.retry_delay_sec) # back off after first time
return self.open(request_path, retry + 1)
def __request_url__(self, request_path):
if request_path:
return self.base_url + request_path
else:
return self.base_url
def __open__(self, request):
if not self.ssl_config.ctx:
return urlopen(request)
return urlopen(request, context=self.ssl_config.ctx)
def __try__log_contents__(self, e):
try:
logger.info("Response heades: %s" % str(e.headers))
logger.info("Response contents %s" % e.file.read())
except:
pass # ignore
|
|
import pytest
from api.base.settings.defaults import API_BASE
from framework.auth.core import Auth
from osf_tests.factories import (
AuthUserFactory,
NodeFactory,
RegistrationFactory,
)
from rest_framework import exceptions
@pytest.mark.django_db
class LinkedRegistrationsTestCase:
@pytest.fixture()
def user_admin_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_write_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_read_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def registration(self):
return RegistrationFactory(is_public=True)
@pytest.fixture()
def node_public(self, registration):
node_public = NodeFactory(is_public=True)
node_public.add_pointer(registration, auth=Auth(node_public.creator))
node_public.save()
return node_public
@pytest.fixture()
def node_private(
self, user_admin_contrib, user_write_contrib,
user_read_contrib, registration):
node_private = NodeFactory(creator=user_admin_contrib)
node_private.add_contributor(
user_write_contrib,
auth=Auth(user_admin_contrib))
node_private.add_contributor(
user_read_contrib,
permissions=['read'],
auth=Auth(user_admin_contrib))
node_private.add_pointer(registration, auth=Auth(user_admin_contrib))
return node_private
@pytest.mark.django_db
class TestNodeLinkedRegistrationsList(LinkedRegistrationsTestCase):
@pytest.fixture()
def make_request(self, app):
def request(node_id=None, auth=None, expect_errors=False):
url = '/{}nodes/{}/linked_registrations/'.format(API_BASE, node_id)
if auth:
return app.get(url, auth=auth, expect_errors=expect_errors)
return app.get(url, expect_errors=expect_errors)
return request
def test_view_linked_registrations(
self, make_request, user_admin_contrib,
user_write_contrib, user_read_contrib,
user_non_contrib, registration,
node_public, node_private):
# test_public_node_unauthenticated_user_can_view_linked_registrations
res = make_request(node_id=node_public._id)
assert res.status_code == 200
assert res.json['data'][0]['id'] == registration._id
# test_private_node_admin_contributor_can_view_linked_registrations
res = make_request(
node_id=node_private._id,
auth=user_admin_contrib.auth)
assert res.status_code == 200
assert res.json['data'][0]['id'] == registration._id
# test_private_node_rw_contributor_can_view_linked_registrations
res = make_request(
node_id=node_private._id,
auth=user_write_contrib.auth)
assert res.status_code == 200
assert res.json['data'][0]['id'] == registration._id
# test_private_node_read_contributor_can_view_linked_registrations
res = make_request(
node_id=node_private._id,
auth=user_read_contrib.auth)
assert res.status_code == 200
assert res.json['data'][0]['id'] == registration._id
# test_private_node_non_contributor_cannot_view_linked_registrations
res = make_request(
node_id=node_private._id,
auth=user_non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_private_node_unauthenticated_user_cannot_view_linked_registrations
res = make_request(node_id=node_private._id, expect_errors=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
@pytest.mark.django_db
class TestNodeLinkedRegistrationsRelationshipRetrieve(
LinkedRegistrationsTestCase):
@pytest.fixture()
def make_request(self, app):
def request(node_id=None, auth=None, expect_errors=False):
url = '/{}nodes/{}/relationships/linked_registrations/'.format(
API_BASE, node_id)
if auth:
return app.get(url, auth=auth, expect_errors=expect_errors)
return app.get(url, expect_errors=expect_errors)
return request
def test_can_vew_linked_registrations_relationship(
self, make_request, registration, user_admin_contrib,
user_write_contrib, user_read_contrib, user_non_contrib,
node_public, node_private):
# test_public_node_unauthenticated_user_can_view_linked_registrations_relationship
res = make_request(node_id=node_public._id)
assert res.status_code == 200
assert res.json['data'][0]['id'] == registration._id
# test_private_node_admin_contributor_can_view_linked_registrations_relationship
res = make_request(
node_id=node_private._id,
auth=user_admin_contrib.auth)
assert res.status_code == 200
assert res.json['data'][0]['id'] == registration._id
# test_private_node_rw_contributor_can_view_linked_registrations_relationship
res = make_request(
node_id=node_private._id,
auth=user_write_contrib.auth)
assert res.status_code == 200
assert res.json['data'][0]['id'] == registration._id
# test_private_node_read_contributor_can_view_linked_registrations_relationship
res = make_request(
node_id=node_private._id,
auth=user_read_contrib.auth)
assert res.status_code == 200
assert res.json['data'][0]['id'] == registration._id
# test_private_node_non_contributor_cannot_view_linked_registrations_relationship
res = make_request(
node_id=node_private._id,
auth=user_non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_private_node_unauthenticated_user_cannot_view_linked_registrations_relationship
res = make_request(node_id=node_private._id, expect_errors=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
@pytest.mark.django_db
class TestNodeLinkedRegistrationsRelationshipCreate(
LinkedRegistrationsTestCase):
@pytest.fixture()
def make_payload(self):
def payload(registration_id=None):
return {
'data': [{
'type': 'linked_registrations',
'id': registration_id
}]
}
return payload
@pytest.fixture()
def make_request(self, app, make_payload):
def request(node_id=None, auth=None, reg_id=None, expect_errors=False):
url = '/{}nodes/{}/relationships/linked_registrations/'.format(
API_BASE, node_id)
if auth:
return app.post_json_api(
url, make_payload(registration_id=reg_id),
auth=auth, expect_errors=expect_errors)
return app.post_json_api(
url,
make_payload(registration_id=reg_id),
expect_errors=expect_errors)
return request
def test_admin_contributor_can_create_linked_registrations_relationship(
self, make_request, user_admin_contrib, node_private):
registration = RegistrationFactory(is_public=True)
res = make_request(
node_id=node_private._id,
reg_id=registration._id,
auth=user_admin_contrib.auth
)
assert res.status_code == 201
linked_registrations = [r['id'] for r in res.json['data']]
assert registration._id in linked_registrations
def test_rw_contributor_can_create_linked_registrations_relationship(
self, make_request, user_write_contrib, node_private):
registration = RegistrationFactory(is_public=True)
res = make_request(
node_id=node_private._id,
reg_id=registration._id,
auth=user_write_contrib.auth
)
assert res.status_code == 201
linked_registrations = [r['id'] for r in res.json['data']]
assert registration._id in linked_registrations
def test_cannot_create_linked_registrations_relationship(
self, make_request, user_admin_contrib, user_read_contrib,
user_non_contrib, node_private):
# test_read_contributor_cannot_create_linked_registrations_relationship
registration = RegistrationFactory(is_public=True)
res = make_request(
node_id=node_private._id,
reg_id=registration._id,
auth=user_read_contrib.auth,
expect_errors=True
)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_non_contributor_cannot_create_linked_registrations_relationship
registration = RegistrationFactory(is_public=True)
res = make_request(
node_id=node_private._id,
reg_id=registration._id,
auth=user_non_contrib.auth,
expect_errors=True
)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_unauthenticated_user_cannot_create_linked_registrations_relationship
registration = RegistrationFactory(is_public=True)
res = make_request(
node_id=node_private._id,
reg_id=registration._id,
expect_errors=True
)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
# test_cannot_create_linked_registrations_relationship_invalid_registration_guid
res = make_request(
node_id=node_private._id,
reg_id='abcde',
auth=user_admin_contrib.auth,
expect_errors=True
)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == 'Node with id "abcde" was not found'
# test_cannot_create_linked_registration_relationship_to_private_registration_if_non_contributor
registration = RegistrationFactory()
res = make_request(
node_id=node_private._id,
reg_id=registration._id,
auth=user_admin_contrib.auth,
expect_errors=True
)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
def test_create_linked_registrations_relationship_registration_already_in_linked_registrations_returns_no_content(
self, make_request, registration, node_private, user_admin_contrib):
res = make_request(
node_id=node_private._id,
reg_id=registration._id,
auth=user_admin_contrib.auth
)
assert res.status_code == 204
def test_can_create_linked_registration_relationship_to_private_registration_if_admin(
self, make_request, user_admin_contrib, node_private):
registration = RegistrationFactory(creator=user_admin_contrib)
res = make_request(
node_id=node_private._id,
reg_id=registration._id,
auth=user_admin_contrib.auth
)
assert res.status_code == 201
linked_registrations = [r['id'] for r in res.json['data']]
assert registration._id in linked_registrations
def test_can_create_linked_registration_relationship_to_private_registration_if_rw(
self, make_request, user_admin_contrib, node_private):
registration = RegistrationFactory()
registration.add_contributor(
user_admin_contrib,
auth=Auth(registration.creator))
registration.save()
res = make_request(
node_id=node_private._id,
reg_id=registration._id,
auth=user_admin_contrib.auth
)
assert res.status_code == 201
linked_registrations = [r['id'] for r in res.json['data']]
assert registration._id in linked_registrations
def test_can_create_linked_registration_relationship_to_private_registration_if_read_only(
self, make_request, user_admin_contrib, node_private):
registration = RegistrationFactory()
registration.add_contributor(
user_admin_contrib,
auth=Auth(registration.creator),
permissions=['read'])
registration.save()
res = make_request(
node_id=node_private._id,
reg_id=registration._id,
auth=user_admin_contrib.auth
)
assert res.status_code == 201
linked_registrations = [r['id'] for r in res.json['data']]
assert registration._id in linked_registrations
@pytest.mark.django_db
class TestNodeLinkedRegistrationsRelationshipUpdate(
LinkedRegistrationsTestCase):
@pytest.fixture()
def make_payload(self):
def payload(registration_id=None):
return {
'data': [{
'type': 'linked_registrations',
'id': registration_id
}]
}
return payload
@pytest.fixture()
def make_request(self, app, make_payload):
def request(node_id=None, auth=None, reg_id=None, expect_errors=False):
url = '/{}nodes/{}/relationships/linked_registrations/'.format(
API_BASE, node_id)
if auth:
return app.put_json_api(
url,
make_payload(registration_id=reg_id),
auth=auth, expect_errors=expect_errors)
return app.put_json_api(
url,
make_payload(registration_id=reg_id),
expect_errors=expect_errors)
return request
def test_admin_contributor_can_update_linked_registrations_relationship(
self, make_request, registration, user_admin_contrib, node_private):
registration_two = RegistrationFactory(is_public=True)
res = make_request(
node_id=node_private._id,
reg_id=registration_two._id,
auth=user_admin_contrib.auth
)
assert res.status_code == 200
linked_registrations = [r['id'] for r in res.json['data']]
assert registration._id not in linked_registrations
assert registration_two._id in linked_registrations
def test_rw_contributor_can_update_linked_registrations_relationship(
self, make_request, registration, user_write_contrib, node_private):
registration_two = RegistrationFactory(is_public=True)
res = make_request(
node_id=node_private._id,
reg_id=registration_two._id,
auth=user_write_contrib.auth
)
assert res.status_code == 200
linked_registrations = [r['id'] for r in res.json['data']]
assert registration._id not in linked_registrations
assert registration_two._id in linked_registrations
def test_empty_payload_removes_existing_linked_registrations(
self, app, user_admin_contrib, registration, node_private):
url = '/{}nodes/{}/relationships/linked_registrations/'.format(
API_BASE, node_private._id)
res = app.put_json_api(url, {}, auth=user_admin_contrib.auth)
assert res.status_code == 200
linked_registrations = [r['id'] for r in res.json['data']]
assert registration._id not in linked_registrations
def test_cannot_update_linked_registrations_relationship(
self, make_request, user_read_contrib, user_non_contrib, node_private):
# test_read_contributor_cannot_update_linked_registrations_relationship
registration = RegistrationFactory(is_public=True)
res = make_request(
node_id=node_private._id,
reg_id=registration._id,
auth=user_read_contrib.auth,
expect_errors=True
)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_non_contributor_cannot_update_linked_registrations_relationship
registration = RegistrationFactory(is_public=True)
res = make_request(
node_id=node_private._id,
reg_id=registration._id,
auth=user_non_contrib.auth,
expect_errors=True
)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_unauthenticated_user_cannot_update_linked_registrations_relationship
registration = RegistrationFactory(is_public=True)
res = make_request(
node_id=node_private._id,
reg_id=registration._id,
expect_errors=True
)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
@pytest.mark.django_db
class TestNodeLinkedRegistrationsRelationshipDelete(
LinkedRegistrationsTestCase):
@pytest.fixture()
def make_payload(self):
def payload(registration_id=None):
return {
'data': [{
'type': 'linked_registrations',
'id': registration_id
}]
}
return payload
@pytest.fixture()
def make_request(self, app, make_payload):
def request(node_id=None, auth=None, reg_id=None, expect_errors=False):
url = '/{}nodes/{}/relationships/linked_registrations/'.format(
API_BASE, node_id)
if auth:
return app.delete_json_api(
url, make_payload(reg_id),
auth=auth, expect_errors=expect_errors)
return app.delete_json_api(
url, make_payload(reg_id),
expect_errors=expect_errors)
return request
def test_admin_contributor_can_delete_linked_registrations_relationship(
self, make_request, registration, user_admin_contrib, node_private):
res = make_request(
node_id=node_private._id,
auth=user_admin_contrib.auth,
reg_id=registration._id
)
assert res.status_code == 204
def test_rw_contributor_can_delete_linked_registrations_relationship(
self, make_request, registration, user_write_contrib, node_private):
res = make_request(
node_id=node_private._id,
auth=user_write_contrib.auth,
reg_id=registration._id
)
assert res.status_code == 204
def test_linked_registrations_relationship_errors(
self, make_request, registration, user_admin_contrib,
user_read_contrib, user_non_contrib, node_private):
# test_read_contributor_cannot_delete_linked_registrations_relationship
res = make_request(
node_id=node_private._id,
auth=user_read_contrib.auth,
reg_id=registration._id,
expect_errors=True
)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_non_contributor_cannot_delete_linked_registrations_relationship
res = make_request(
node_id=node_private._id,
auth=user_non_contrib.auth,
reg_id=registration._id,
expect_errors=True
)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_unauthenticated_user_cannot_delete_linked_registrations_relationship
res = make_request(
node_id=node_private._id,
reg_id=registration._id,
expect_errors=True
)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
# test_cannot_delete_linked_registrations_relationship_invalid_registration_guid
res = make_request(
node_id=node_private._id,
auth=user_admin_contrib.auth,
reg_id='abcde',
expect_errors=True
)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == 'Pointer with id "abcde" not found in pointers list'
# test_cannot_delete_linked_registrations_relationship_registration_not_in_linked_registrations
registration_two = RegistrationFactory(is_public=True)
res = make_request(
node_id=node_private._id,
auth=user_admin_contrib.auth,
reg_id=registration_two._id,
expect_errors=True
)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == 'Pointer with id "{}" not found in pointers list'.format(
registration_two._id)
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests model set evaluation for some common use cases.
"""
import pytest
import numpy as np
from numpy.testing.utils import assert_allclose
from ..models import Polynomial1D, Polynomial2D
from ..fitting import LinearLSQFitter
from ..core import Model
from ..parameters import Parameter
x = np.arange(4)
xx = np.array([x, x + 10])
xxx = np.arange(24).reshape((3, 4, 2))
class TParModel(Model):
"""
A toy model to test parameters machinery
"""
# standard_broadasting = False
inputs = ('x',)
outputs = ('x',)
coeff = Parameter()
e = Parameter()
def __init__(self, coeff, e, **kwargs):
super().__init__(coeff=coeff, e=e, **kwargs)
@staticmethod
def evaluate(x, coeff, e):
return x*coeff + e
def test_model_axis_1():
"""
Test that a model initialized with model_set_axis=1
can be evaluated with model_set_axis=False.
"""
model_axis = 1
n_models = 2
p1 = Polynomial1D(1, n_models=n_models, model_set_axis=model_axis)
p1.c0 = [2, 3]
p1.c1 = [1, 2]
t1 = Polynomial1D(1, c0=2, c1=1)
t2 = Polynomial1D(1, c0=3, c1=2)
with pytest.raises(ValueError):
p1(x)
with pytest.raises(ValueError):
p1(xx)
y = p1(x, model_set_axis=False)
assert y.shape[model_axis] == n_models
assert_allclose(y[:, 0], t1(x))
assert_allclose(y[:, 1], t2(x))
y = p1(xx, model_set_axis=False)
assert y.shape[model_axis] == n_models
assert_allclose(y[:, 0, :], t1(xx))
assert_allclose(y[:, 1, :], t2(xx))
y = p1(xxx, model_set_axis=False)
assert y.shape[model_axis] == n_models
assert_allclose(y[:, 0, :, :], t1(xxx))
assert_allclose(y[:, 1, :, :], t2(xxx))
def test_model_axis_2():
"""
Test that a model initialized with model_set_axis=2
can be evaluated with model_set_axis=False.
"""
p1 = Polynomial1D(1, c0=[[[1, 2,3 ]]], c1=[[[10, 20, 30]]],
n_models=3, model_set_axis=2)
t1 = Polynomial1D(1, c0=1, c1=10)
t2 = Polynomial1D(1, c0=2, c1=20)
t3 = Polynomial1D(1, c0=3, c1=30)
with pytest.raises(ValueError):
p1(x)
with pytest.raises(ValueError):
p1(xx)
y = p1(x, model_set_axis=False)
assert y.shape == (1, 4, 3)
assert_allclose(y[:, :, 0].flatten(), t1(x))
assert_allclose(y[:, :, 1].flatten(), t2(x))
assert_allclose(y[:, :, 2].flatten(), t3(x))
p2 = Polynomial2D(1, c0_0=[[[0,1,2]]], c0_1=[[[3,4,5]]],
c1_0=[[[5,6,7]]], n_models=3, model_set_axis=2)
t1 = Polynomial2D(1, c0_0=0, c0_1=3, c1_0=5)
t2 = Polynomial2D(1, c0_0=1, c0_1=4, c1_0=6)
t3 = Polynomial2D(1, c0_0=2, c0_1=5, c1_0=7)
assert p2.c0_0.shape == ()
y = p2(x, x, model_set_axis=False)
assert y.shape == (1, 4, 3)
# These are columns along the 2nd axis.
assert_allclose(y[:, :, 0].flatten(), t1(x, x))
assert_allclose(y[:, :, 1].flatten(), t2(x, x))
assert_allclose(y[:, :, 2].flatten(), t3(x, x))
def test_axis_0():
"""
Test that a model initialized with model_set_axis=0
can be evaluated with model_set_axis=False.
"""
p1 = Polynomial1D(1, n_models=2, model_set_axis=0)
p1.c0 = [2, 3]
p1.c1 = [1, 2]
t1 = Polynomial1D(1, c0=2, c1=1)
t2 = Polynomial1D(1, c0=3, c1=2)
with pytest.raises(ValueError):
p1(x)
y = p1(xx)
assert len(y) == 2
assert_allclose(y[0], t1(xx[0]))
assert_allclose(y[1], t2(xx[1]))
y = p1(x, model_set_axis=False)
assert len(y) == 2
assert_allclose(y[0], t1(x))
assert_allclose(y[1], t2(x))
y = p1(xx, model_set_axis=False)
assert len(y) == 2
assert_allclose(y[0], t1(xx))
assert_allclose(y[1], t2(xx))
y = p1(xxx, model_set_axis=False)
assert_allclose(y[0], t1(xxx))
assert_allclose(y[1], t2(xxx))
assert len(y) == 2
def test_negative_axis():
p1 = Polynomial1D(1, c0=[1, 2], c1=[3, 4], n_models=2, model_set_axis=-1)
t1 = Polynomial1D(1, c0=1,c1=3)
t2 = Polynomial1D(1, c0=2,c1=4)
with pytest.raises(ValueError):
p1(x)
with pytest.raises(ValueError):
p1(xx)
xxt = xx.T
y = p1(xxt)
assert_allclose(y[: ,0], t1(xxt[: ,0]))
assert_allclose(y[: ,1], t2(xxt[: ,1]))
def test_shapes():
p2 = Polynomial1D(1, n_models=3, model_set_axis=2)
assert p2.c0.shape == ()
assert p2.c1.shape == ()
p1 = Polynomial1D(1, n_models=2, model_set_axis=1)
assert p1.c0.shape == ()
assert p1.c1.shape == ()
p1 = Polynomial1D(1, c0=[1, 2], c1=[3, 4], n_models=2, model_set_axis=-1)
assert p1.c0.shape == ()
assert p1.c1.shape == ()
e1 = [1, 2]
e2 = [3, 4]
a1 = np.array([[10, 20], [30, 40]])
a2 = np.array([[50, 60], [70, 80]])
t = TParModel([a1, a2], [e1, e2], n_models=2, model_set_axis=-1)
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2,)
t = TParModel([[a1, a2]], [[e1, e2]], n_models=2, model_set_axis=1)
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2,)
t = TParModel([a1, a2], [e1, e2], n_models=2, model_set_axis=0)
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2,)
t = TParModel([a1, a2], e=[1, 2], n_models=2, model_set_axis=0)
assert t.coeff.shape == (2, 2)
assert t.e.shape == ()
def test_linearlsqfitter():
"""
Issue #7159
"""
p = Polynomial1D(1, n_models=2, model_set_axis=1)
# Generate data for fitting 2 models and re-stack them along the last axis:
y = np.array([2*x+1, x+4])
y = np.rollaxis(y, 0, -1).T
f = LinearLSQFitter()
# This seems to fit the model_set correctly:
fit = f(p, x, y)
model_y = fit(x, model_set_axis=False)
m1 = Polynomial1D(1, c0=fit.c0[0][0], c1=fit.c1[0][0])
m2 = Polynomial1D(1, c0=fit.c0[0][1], c1=fit.c1[0][1])
assert_allclose(model_y[:, 0], m1(x))
assert_allclose(model_y[:, 1], m2(x))
def test_model_set_axis_outputs():
fitter = LinearLSQFitter()
model_set = Polynomial2D(1, n_models=2, model_set_axis=2)
y2, x2 = np.mgrid[: 5, : 5]
# z = np.moveaxis([x2 + y2, 1 - 0.1 * x2 + 0.2 * y2]), 0, 2)
z = np.rollaxis(np.array([x2 + y2, 1 - 0.1 * x2 + 0.2 * y2]), 0, 3)
model = fitter(model_set, x2, y2, z)
res = model(x2, y2, model_set_axis=False)
assert z.shape == res.shape
# Test initializing with integer model_set_axis
# and evaluating with a different model_set_axis
model_set = Polynomial1D(1, c0=[1, 2], c1=[2, 3],
n_models=2, model_set_axis=0)
y0 = model_set(xx)
y1 = model_set(xx.T, model_set_axis=1)
assert_allclose(y0[0], y1[:, 0])
assert_allclose(y0[1], y1[:, 1])
model_set = Polynomial1D(1, c0=[[1, 2]], c1=[[2, 3]],
n_models=2, model_set_axis=1)
y0 = model_set(xx.T)
y1 = model_set(xx, model_set_axis=0)
assert_allclose(y0[:, 0], y1[0])
assert_allclose(y0[:, 1], y1[1])
with pytest.raises(ValueError):
model_set(x)
|
|
import graphene
import pytest
from unittest.mock import patch, Mock
from saleor.order import OrderEvents, OrderEventsEmails
from saleor.order.models import FulfillmentStatus
from tests.api.utils import get_graphql_content
CREATE_FULFILLMENT_QUERY = """
mutation fulfillOrder(
$order: ID, $lines: [FulfillmentLineInput]!, $tracking: String,
$notify: Boolean
) {
orderFulfillmentCreate(
order: $order,
input: {
lines: $lines, trackingNumber: $tracking,
notifyCustomer: $notify}
) {
errors {
field
message
}
fulfillment {
fulfillmentOrder
status
trackingNumber
lines {
id
}
}
}
}
"""
@patch(
'saleor.graphql.order.mutations.fulfillments.'
'send_fulfillment_confirmation')
def test_create_fulfillment(
mock_send_fulfillment_confirmation, staff_api_client, order_with_lines,
staff_user, permission_manage_orders):
order = order_with_lines
query = CREATE_FULFILLMENT_QUERY
order_id = graphene.Node.to_global_id('Order', order.id)
order_line = order.lines.first()
order_line_id = graphene.Node.to_global_id('OrderLine', order_line.id)
tracking = 'Flames tracking'
assert not order.events.all()
variables = {
'order': order_id,
'lines': [{'orderLineId': order_line_id, 'quantity': 1}],
'tracking': tracking, 'notify': True}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['orderFulfillmentCreate']['fulfillment']
assert data['fulfillmentOrder'] == 1
assert data['status'] == FulfillmentStatus.FULFILLED.upper()
assert data['trackingNumber'] == tracking
assert len(data['lines']) == 1
event_fulfillment, event_email_sent = order.events.all()
assert event_fulfillment.type == (
OrderEvents.FULFILLMENT_FULFILLED_ITEMS.value)
assert event_fulfillment.parameters == {'quantity': 1}
assert event_fulfillment.user == staff_user
assert event_email_sent.type == OrderEvents.EMAIL_SENT.value
assert event_email_sent.user == staff_user
assert event_email_sent.parameters == {
'email': order.user_email,
'email_type': OrderEventsEmails.FULFILLMENT.value}
assert mock_send_fulfillment_confirmation.delay.called
@patch(
'saleor.graphql.order.mutations.fulfillments.'
'send_fulfillment_confirmation')
def test_create_fulfillment_with_emtpy_quantity(
mock_send_fulfillment_confirmation, staff_api_client, order_with_lines,
staff_user, permission_manage_orders):
order = order_with_lines
query = CREATE_FULFILLMENT_QUERY
order_id = graphene.Node.to_global_id('Order', order.id)
order_lines = order.lines.all()
order_line_ids = [
graphene.Node.to_global_id(
'OrderLine', order_line.id) for order_line in order_lines]
tracking = 'Flames tracking'
assert not order.events.all()
variables = {
'order': order_id,
'lines': [{
'orderLineId': order_line_id,
'quantity': 1} for order_line_id in order_line_ids],
'tracking': tracking, 'notify': True}
variables['lines'][0]['quantity'] = 0
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['orderFulfillmentCreate']['fulfillment']
assert data['fulfillmentOrder'] == 1
assert data['status'] == FulfillmentStatus.FULFILLED.upper()
assert mock_send_fulfillment_confirmation.delay.called
@pytest.mark.parametrize(
'quantity, error_message, error_field',
(
(0, 'Total quantity must be larger than 0.', 'lines'),
(100, 'Only 3 items remaining to fulfill:', 'orderLineId')))
def test_create_fulfillment_not_sufficient_quantity(
staff_api_client, order_with_lines, staff_user, quantity,
error_message, error_field, permission_manage_orders):
query = CREATE_FULFILLMENT_QUERY
order_line = order_with_lines.lines.first()
order_line_id = graphene.Node.to_global_id('OrderLine', order_line.id)
variables = {
'order': graphene.Node.to_global_id('Order', order_with_lines.id),
'lines': [{'orderLineId': order_line_id, 'quantity': quantity}]}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['orderFulfillmentCreate']
assert data['errors']
assert data['errors'][0]['field'] == error_field
assert error_message in data['errors'][0]['message']
def test_create_fulfillment_with_invalid_input(
staff_api_client, order_with_lines, permission_manage_orders):
query = CREATE_FULFILLMENT_QUERY
variables = {
'order': graphene.Node.to_global_id('Order', order_with_lines.id),
'lines': [{'orderLineId': 'fake-orderline-id', 'quantity': 1}]}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['orderFulfillmentCreate']
assert data['errors']
assert data['errors'][0]['field'] == 'lines'
assert data['errors'][0]['message'] == (
'Could not resolve to a node with the global id list'
' of \'[\'fake-orderline-id\']\'.')
def test_fulfillment_update_tracking(
staff_api_client, fulfillment, permission_manage_orders):
query = """
mutation updateFulfillment($id: ID!, $tracking: String) {
orderFulfillmentUpdateTracking(
id: $id, input: {trackingNumber: $tracking}) {
fulfillment {
trackingNumber
}
}
}
"""
fulfillment_id = graphene.Node.to_global_id('Fulfillment', fulfillment.id)
tracking = 'stationary tracking'
variables = {'id': fulfillment_id, 'tracking': tracking}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['orderFulfillmentUpdateTracking']['fulfillment']
assert data['trackingNumber'] == tracking
def test_cancel_fulfillment_restock_items(
staff_api_client, fulfillment, staff_user, permission_manage_orders):
query = """
mutation cancelFulfillment($id: ID!, $restock: Boolean) {
orderFulfillmentCancel(id: $id, input: {restock: $restock}) {
fulfillment {
status
}
}
}
"""
fulfillment_id = graphene.Node.to_global_id('Fulfillment', fulfillment.id)
variables = {'id': fulfillment_id, 'restock': True}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['orderFulfillmentCancel']['fulfillment']
assert data['status'] == FulfillmentStatus.CANCELED.upper()
event_restocked_items = fulfillment.order.events.get()
assert event_restocked_items.type == (
OrderEvents.FULFILLMENT_RESTOCKED_ITEMS.value)
assert event_restocked_items.parameters == {
'quantity': fulfillment.get_total_quantity()}
assert event_restocked_items.user == staff_user
def test_cancel_fulfillment(
staff_api_client, fulfillment, staff_user, permission_manage_orders):
query = """
mutation cancelFulfillment($id: ID!, $restock: Boolean) {
orderFulfillmentCancel(id: $id, input: {restock: $restock}) {
fulfillment {
status
}
}
}
"""
fulfillment_id = graphene.Node.to_global_id('Fulfillment', fulfillment.id)
variables = {'id': fulfillment_id, 'restock': False}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['orderFulfillmentCancel']['fulfillment']
assert data['status'] == FulfillmentStatus.CANCELED.upper()
event_cancel_fulfillment = fulfillment.order.events.get()
assert event_cancel_fulfillment.type == (
OrderEvents.FULFILLMENT_CANCELED.value)
assert event_cancel_fulfillment.parameters == {
'composed_id': fulfillment.composed_id}
assert event_cancel_fulfillment.user == staff_user
@patch(
'saleor.graphql.order.mutations.fulfillments.'
'send_fulfillment_confirmation')
def test_create_digital_fulfillment(
mock_send_fulfillment_confirmation, digital_content, staff_api_client,
order_with_lines, staff_user, permission_manage_orders):
order = order_with_lines
query = CREATE_FULFILLMENT_QUERY
order_id = graphene.Node.to_global_id('Order', order.id)
order_line = order.lines.first()
order_line.variant = digital_content.product_variant
order_line.save()
second_line = order.lines.last()
first_line_id = graphene.Node.to_global_id('OrderLine', order_line.id)
second_line_id = graphene.Node.to_global_id('OrderLine', second_line.id)
tracking = 'Flames tracking'
assert not order.events.all()
variables = {
'order': order_id,
'lines': [
{'orderLineId': first_line_id, 'quantity': 1},
{'orderLineId': second_line_id, 'quantity': 1}],
'tracking': tracking, 'notify': True}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders])
get_graphql_content(response)
event_fulfillment, event_email_sent = order.events.all()
assert event_fulfillment.type == (
OrderEvents.FULFILLMENT_FULFILLED_ITEMS.value)
assert event_fulfillment.parameters == {'quantity': 2}
assert event_fulfillment.user == staff_user
assert event_email_sent.type == OrderEvents.EMAIL_SENT.value
assert event_email_sent.user == staff_user
assert event_email_sent.parameters == {
'email': order.user_email,
'email_type': OrderEventsEmails.FULFILLMENT.value}
digital_content.refresh_from_db()
assert digital_content.urls.count() == 1
assert digital_content.urls.all()[0].line == order_line
assert mock_send_fulfillment_confirmation.delay.called
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2010 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Support classes for generating code from abstract syntax trees."""
try:
import _ast
except ImportError:
from genshi.template.ast24 import _ast, parse
else:
def parse(source, mode):
return compile(source, '', mode, _ast.PyCF_ONLY_AST)
from genshi.compat import IS_PYTHON2
__docformat__ = 'restructuredtext en'
class ASTCodeGenerator(object):
"""General purpose base class for AST transformations.
Every visitor method can be overridden to return an AST node that has been
altered or replaced in some way.
"""
def __init__(self, tree):
self.lines_info = []
self.line_info = None
self.code = ''
self.line = None
self.last = None
self.indent = 0
self.blame_stack = []
self.visit(tree)
if self.line.strip():
self.code += self.line + '\n'
self.lines_info.append(self.line_info)
self.line = None
self.line_info = None
def _change_indent(self, delta):
self.indent += delta
def _new_line(self):
if self.line is not None:
self.code += self.line + '\n'
self.lines_info.append(self.line_info)
self.line = ' '*4*self.indent
if len(self.blame_stack) == 0:
self.line_info = []
self.last = None
else:
self.line_info = [(0, self.blame_stack[-1],)]
self.last = self.blame_stack[-1]
def _write(self, s):
if len(s) == 0:
return
if len(self.blame_stack) == 0:
if self.last is not None:
self.last = None
self.line_info.append((len(self.line), self.last))
else:
if self.last != self.blame_stack[-1]:
self.last = self.blame_stack[-1]
self.line_info.append((len(self.line), self.last))
self.line += s
def visit(self, node):
if node is None:
return None
if type(node) is tuple:
return tuple([self.visit(n) for n in node])
try:
self.blame_stack.append((node.lineno, node.col_offset,))
info = True
except AttributeError:
info = False
visitor = getattr(self, 'visit_%s' % node.__class__.__name__, None)
if visitor is None:
raise Exception('Unhandled node type %r' % type(node))
ret = visitor(node)
if info:
self.blame_stack.pop()
return ret
def visit_Module(self, node):
for n in node.body:
self.visit(n)
visit_Interactive = visit_Module
visit_Suite = visit_Module
def visit_Expression(self, node):
self._new_line()
return self.visit(node.body)
# arguments = (expr* args, identifier? vararg,
# identifier? kwarg, expr* defaults)
def visit_arguments(self, node):
first = True
no_default_count = len(node.args) - len(node.defaults)
for i, arg in enumerate(node.args):
if not first:
self._write(', ')
else:
first = False
self.visit(arg)
if i >= no_default_count:
self._write('=')
self.visit(node.defaults[i - no_default_count])
if getattr(node, 'vararg', None):
if not first:
self._write(', ')
else:
first = False
self._write('*' + node.vararg)
if getattr(node, 'kwarg', None):
if not first:
self._write(', ')
else:
first = False
self._write('**' + node.kwarg)
if not IS_PYTHON2:
# In Python 3 arguments get a special node
def visit_arg(self, node):
self._write(node.arg)
# FunctionDef(identifier name, arguments args,
# stmt* body, expr* decorator_list)
def visit_FunctionDef(self, node):
decarators = ()
if hasattr(node, 'decorator_list'):
decorators = getattr(node, 'decorator_list')
else: # different name in earlier Python versions
decorators = getattr(node, 'decorators', ())
for decorator in decorators:
self._new_line()
self._write('@')
self.visit(decorator)
self._new_line()
self._write('def ' + node.name + '(')
self.visit(node.args)
self._write('):')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
# ClassDef(identifier name, expr* bases, stmt* body)
def visit_ClassDef(self, node):
self._new_line()
self._write('class ' + node.name)
if node.bases:
self._write('(')
self.visit(node.bases[0])
for base in node.bases[1:]:
self._write(', ')
self.visit(base)
self._write(')')
self._write(':')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
# Return(expr? value)
def visit_Return(self, node):
self._new_line()
self._write('return')
if getattr(node, 'value', None):
self._write(' ')
self.visit(node.value)
# Delete(expr* targets)
def visit_Delete(self, node):
self._new_line()
self._write('del ')
self.visit(node.targets[0])
for target in node.targets[1:]:
self._write(', ')
self.visit(target)
# Assign(expr* targets, expr value)
def visit_Assign(self, node):
self._new_line()
for target in node.targets:
self.visit(target)
self._write(' = ')
self.visit(node.value)
# AugAssign(expr target, operator op, expr value)
def visit_AugAssign(self, node):
self._new_line()
self.visit(node.target)
self._write(' ' + self.binary_operators[node.op.__class__] + '= ')
self.visit(node.value)
# Print(expr? dest, expr* values, bool nl)
def visit_Print(self, node):
self._new_line()
self._write('print')
if getattr(node, 'dest', None):
self._write(' >> ')
self.visit(node.dest)
if getattr(node, 'values', None):
self._write(', ')
else:
self._write(' ')
if getattr(node, 'values', None):
self.visit(node.values[0])
for value in node.values[1:]:
self._write(', ')
self.visit(value)
if not node.nl:
self._write(',')
# For(expr target, expr iter, stmt* body, stmt* orelse)
def visit_For(self, node):
self._new_line()
self._write('for ')
self.visit(node.target)
self._write(' in ')
self.visit(node.iter)
self._write(':')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
if getattr(node, 'orelse', None):
self._new_line()
self._write('else:')
self._change_indent(1)
for statement in node.orelse:
self.visit(statement)
self._change_indent(-1)
# While(expr test, stmt* body, stmt* orelse)
def visit_While(self, node):
self._new_line()
self._write('while ')
self.visit(node.test)
self._write(':')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
if getattr(node, 'orelse', None):
self._new_line()
self._write('else:')
self._change_indent(1)
for statement in node.orelse:
self.visit(statement)
self._change_indent(-1)
# If(expr test, stmt* body, stmt* orelse)
def visit_If(self, node):
self._new_line()
self._write('if ')
self.visit(node.test)
self._write(':')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
if getattr(node, 'orelse', None):
self._new_line()
self._write('else:')
self._change_indent(1)
for statement in node.orelse:
self.visit(statement)
self._change_indent(-1)
# With(expr context_expr, expr? optional_vars, stmt* body)
# With(withitem* items, stmt* body) in Python >= 3.3
def visit_With(self, node):
self._new_line()
self._write('with ')
items = getattr(node, 'items', None)
first = True
if items is None:
items = [node]
for item in items:
if not first:
self._write(', ')
first = False
self.visit(item.context_expr)
if getattr(item, 'optional_vars', None):
self._write(' as ')
self.visit(item.optional_vars)
self._write(':')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
if IS_PYTHON2:
# Raise(expr? type, expr? inst, expr? tback)
def visit_Raise(self, node):
self._new_line()
self._write('raise')
if not node.type:
return
self._write(' ')
self.visit(node.type)
if not node.inst:
return
self._write(', ')
self.visit(node.inst)
if not node.tback:
return
self._write(', ')
self.visit(node.tback)
else:
# Raise(expr? exc from expr? cause)
def visit_Raise(self, node):
self._new_line()
self._write('raise')
if not node.exc:
return
self._write(' ')
self.visit(node.exc)
if not node.cause:
return
self._write(' from ')
self.visit(node.cause)
# TryExcept(stmt* body, excepthandler* handlers, stmt* orelse)
def visit_TryExcept(self, node):
self._new_line()
self._write('try:')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
if getattr(node, 'handlers', None):
for handler in node.handlers:
self.visit(handler)
self._new_line()
if getattr(node, 'orelse', None):
self._write('else:')
self._change_indent(1)
for statement in node.orelse:
self.visit(statement)
self._change_indent(-1)
# excepthandler = (expr? type, expr? name, stmt* body)
def visit_ExceptHandler(self, node):
self._new_line()
self._write('except')
if getattr(node, 'type', None):
self._write(' ')
self.visit(node.type)
if getattr(node, 'name', None):
self._write(', ')
self.visit(node.name)
self._write(':')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
visit_excepthandler = visit_ExceptHandler
# TryFinally(stmt* body, stmt* finalbody)
def visit_TryFinally(self, node):
self._new_line()
self._write('try:')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
if getattr(node, 'finalbody', None):
self._new_line()
self._write('finally:')
self._change_indent(1)
for statement in node.finalbody:
self.visit(statement)
self._change_indent(-1)
# New in Py3.3
# Try(stmt* body, excepthandler* handlers, stmt* orelse, stmt* finalbody)
def visit_Try(self, node):
self._new_line()
self._write('try:')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
if getattr(node, 'handlers', None):
for handler in node.handlers:
self.visit(handler)
self._new_line()
if getattr(node, 'orelse', None):
self._write('else:')
self._change_indent(1)
for statement in node.orelse:
self.visit(statement)
self._change_indent(-1)
if getattr(node, 'finalbody', None):
self._new_line()
self._write('finally:')
self._change_indent(1)
for statement in node.finalbody:
self.visit(statement)
self._change_indent(-1)
# Assert(expr test, expr? msg)
def visit_Assert(self, node):
self._new_line()
self._write('assert ')
self.visit(node.test)
if getattr(node, 'msg', None):
self._write(', ')
self.visit(node.msg)
def visit_alias(self, node):
self._write(node.name)
if getattr(node, 'asname', None):
self._write(' as ')
self._write(node.asname)
# Import(alias* names)
def visit_Import(self, node):
self._new_line()
self._write('import ')
self.visit(node.names[0])
for name in node.names[1:]:
self._write(', ')
self.visit(name)
# ImportFrom(identifier module, alias* names, int? level)
def visit_ImportFrom(self, node):
self._new_line()
self._write('from ')
if node.level:
self._write('.' * node.level)
self._write(node.module)
self._write(' import ')
self.visit(node.names[0])
for name in node.names[1:]:
self._write(', ')
self.visit(name)
# Exec(expr body, expr? globals, expr? locals)
def visit_Exec(self, node):
self._new_line()
self._write('exec ')
self.visit(node.body)
if not node.globals:
return
self._write(', ')
self.visit(node.globals)
if not node.locals:
return
self._write(', ')
self.visit(node.locals)
# Global(identifier* names)
def visit_Global(self, node):
self._new_line()
self._write('global ')
self.visit(node.names[0])
for name in node.names[1:]:
self._write(', ')
self.visit(name)
# Expr(expr value)
def visit_Expr(self, node):
self._new_line()
self.visit(node.value)
# Pass
def visit_Pass(self, node):
self._new_line()
self._write('pass')
# Break
def visit_Break(self, node):
self._new_line()
self._write('break')
# Continue
def visit_Continue(self, node):
self._new_line()
self._write('continue')
### EXPRESSIONS
def with_parens(f):
def _f(self, node):
self._write('(')
f(self, node)
self._write(')')
return _f
bool_operators = {_ast.And: 'and', _ast.Or: 'or'}
# BoolOp(boolop op, expr* values)
@with_parens
def visit_BoolOp(self, node):
joiner = ' ' + self.bool_operators[node.op.__class__] + ' '
self.visit(node.values[0])
for value in node.values[1:]:
self._write(joiner)
self.visit(value)
binary_operators = {
_ast.Add: '+',
_ast.Sub: '-',
_ast.Mult: '*',
_ast.Div: '/',
_ast.Mod: '%',
_ast.Pow: '**',
_ast.LShift: '<<',
_ast.RShift: '>>',
_ast.BitOr: '|',
_ast.BitXor: '^',
_ast.BitAnd: '&',
_ast.FloorDiv: '//'
}
# BinOp(expr left, operator op, expr right)
@with_parens
def visit_BinOp(self, node):
self.visit(node.left)
self._write(' ' + self.binary_operators[node.op.__class__] + ' ')
self.visit(node.right)
unary_operators = {
_ast.Invert: '~',
_ast.Not: 'not',
_ast.UAdd: '+',
_ast.USub: '-',
}
# UnaryOp(unaryop op, expr operand)
def visit_UnaryOp(self, node):
self._write(self.unary_operators[node.op.__class__] + ' ')
self.visit(node.operand)
# Lambda(arguments args, expr body)
@with_parens
def visit_Lambda(self, node):
self._write('lambda ')
self.visit(node.args)
self._write(': ')
self.visit(node.body)
# IfExp(expr test, expr body, expr orelse)
@with_parens
def visit_IfExp(self, node):
self.visit(node.body)
self._write(' if ')
self.visit(node.test)
self._write(' else ')
self.visit(node.orelse)
# Dict(expr* keys, expr* values)
def visit_Dict(self, node):
self._write('{')
for key, value in zip(node.keys, node.values):
self.visit(key)
self._write(': ')
self.visit(value)
self._write(', ')
self._write('}')
# ListComp(expr elt, comprehension* generators)
def visit_ListComp(self, node):
self._write('[')
self.visit(node.elt)
for generator in node.generators:
# comprehension = (expr target, expr iter, expr* ifs)
self._write(' for ')
self.visit(generator.target)
self._write(' in ')
self.visit(generator.iter)
for ifexpr in generator.ifs:
self._write(' if ')
self.visit(ifexpr)
self._write(']')
# GeneratorExp(expr elt, comprehension* generators)
def visit_GeneratorExp(self, node):
self._write('(')
self.visit(node.elt)
for generator in node.generators:
# comprehension = (expr target, expr iter, expr* ifs)
self._write(' for ')
self.visit(generator.target)
self._write(' in ')
self.visit(generator.iter)
for ifexpr in generator.ifs:
self._write(' if ')
self.visit(ifexpr)
self._write(')')
# Yield(expr? value)
def visit_Yield(self, node):
self._write('yield')
if getattr(node, 'value', None):
self._write(' ')
self.visit(node.value)
comparision_operators = {
_ast.Eq: '==',
_ast.NotEq: '!=',
_ast.Lt: '<',
_ast.LtE: '<=',
_ast.Gt: '>',
_ast.GtE: '>=',
_ast.Is: 'is',
_ast.IsNot: 'is not',
_ast.In: 'in',
_ast.NotIn: 'not in',
}
# Compare(expr left, cmpop* ops, expr* comparators)
@with_parens
def visit_Compare(self, node):
self.visit(node.left)
for op, comparator in zip(node.ops, node.comparators):
self._write(' ' + self.comparision_operators[op.__class__] + ' ')
self.visit(comparator)
# Call(expr func, expr* args, keyword* keywords,
# expr? starargs, expr? kwargs)
def visit_Call(self, node):
self.visit(node.func)
self._write('(')
first = True
for arg in node.args:
if not first:
self._write(', ')
first = False
self.visit(arg)
for keyword in node.keywords:
if not first:
self._write(', ')
first = False
# keyword = (identifier arg, expr value)
self._write(keyword.arg)
self._write('=')
self.visit(keyword.value)
if getattr(node, 'starargs', None):
if not first:
self._write(', ')
first = False
self._write('*')
self.visit(node.starargs)
if getattr(node, 'kwargs', None):
if not first:
self._write(', ')
first = False
self._write('**')
self.visit(node.kwargs)
self._write(')')
# Repr(expr value)
def visit_Repr(self, node):
self._write('`')
self.visit(node.value)
self._write('`')
# Num(object n)
def visit_Num(self, node):
self._write(repr(node.n))
# Str(string s)
def visit_Str(self, node):
self._write(repr(node.s))
if not IS_PYTHON2:
# Bytes(bytes s)
def visit_Bytes(self, node):
self._write(repr(node.s))
# Attribute(expr value, identifier attr, expr_context ctx)
def visit_Attribute(self, node):
self.visit(node.value)
self._write('.')
self._write(node.attr)
# Subscript(expr value, slice slice, expr_context ctx)
def visit_Subscript(self, node):
self.visit(node.value)
self._write('[')
def _process_slice(node):
if isinstance(node, _ast.Ellipsis):
self._write('...')
elif isinstance(node, _ast.Slice):
if getattr(node, 'lower', 'None'):
self.visit(node.lower)
self._write(':')
if getattr(node, 'upper', None):
self.visit(node.upper)
if getattr(node, 'step', None):
self._write(':')
self.visit(node.step)
elif isinstance(node, _ast.Index):
self.visit(node.value)
elif isinstance(node, _ast.ExtSlice):
self.visit(node.dims[0])
for dim in node.dims[1:]:
self._write(', ')
self.visit(dim)
else:
raise NotImplemented('Slice type not implemented')
_process_slice(node.slice)
self._write(']')
# Name(identifier id, expr_context ctx)
def visit_Name(self, node):
self._write(node.id)
# List(expr* elts, expr_context ctx)
def visit_List(self, node):
self._write('[')
for elt in node.elts:
self.visit(elt)
self._write(', ')
self._write(']')
# Tuple(expr *elts, expr_context ctx)
def visit_Tuple(self, node):
self._write('(')
for elt in node.elts:
self.visit(elt)
self._write(', ')
self._write(')')
class ASTTransformer(object):
"""General purpose base class for AST transformations.
Every visitor method can be overridden to return an AST node that has been
altered or replaced in some way.
"""
def visit(self, node):
if node is None:
return None
if type(node) is tuple:
return tuple([self.visit(n) for n in node])
visitor = getattr(self, 'visit_%s' % node.__class__.__name__, None)
if visitor is None:
return node
return visitor(node)
def _clone(self, node):
clone = node.__class__()
for name in getattr(clone, '_attributes', ()):
try:
setattr(clone, name, getattr(node, name))
except AttributeError:
pass
for name in clone._fields:
try:
value = getattr(node, name)
except AttributeError:
pass
else:
if value is None:
pass
elif isinstance(value, list):
value = [self.visit(x) for x in value]
elif isinstance(value, tuple):
value = tuple(self.visit(x) for x in value)
else:
value = self.visit(value)
setattr(clone, name, value)
return clone
visit_Module = _clone
visit_Interactive = _clone
visit_Expression = _clone
visit_Suite = _clone
visit_FunctionDef = _clone
visit_ClassDef = _clone
visit_Return = _clone
visit_Delete = _clone
visit_Assign = _clone
visit_AugAssign = _clone
visit_Print = _clone
visit_For = _clone
visit_While = _clone
visit_If = _clone
visit_With = _clone
visit_Raise = _clone
visit_TryExcept = _clone
visit_TryFinally = _clone
visit_Try = _clone
visit_Assert = _clone
visit_ExceptHandler = _clone
visit_Import = _clone
visit_ImportFrom = _clone
visit_Exec = _clone
visit_Global = _clone
visit_Expr = _clone
# Pass, Break, Continue don't need to be copied
visit_BoolOp = _clone
visit_BinOp = _clone
visit_UnaryOp = _clone
visit_Lambda = _clone
visit_IfExp = _clone
visit_Dict = _clone
visit_ListComp = _clone
visit_GeneratorExp = _clone
visit_Yield = _clone
visit_Compare = _clone
visit_Call = _clone
visit_Repr = _clone
# Num, Str don't need to be copied
visit_Attribute = _clone
visit_Subscript = _clone
visit_Name = _clone
visit_List = _clone
visit_Tuple = _clone
visit_comprehension = _clone
visit_excepthandler = _clone
visit_arguments = _clone
visit_keyword = _clone
visit_alias = _clone
visit_Slice = _clone
visit_ExtSlice = _clone
visit_Index = _clone
del _clone
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
from google.appengine.datastore.entity_pb import EntityProto
class SchemaEntry(ProtocolBuffer.ProtocolMessage):
STRING = 1
INT32 = 2
BOOLEAN = 3
DOUBLE = 4
POINT = 5
USER = 6
REFERENCE = 7
_Type_NAMES = {
1: "STRING",
2: "INT32",
3: "BOOLEAN",
4: "DOUBLE",
5: "POINT",
6: "USER",
7: "REFERENCE",
}
def Type_Name(cls, x): return cls._Type_NAMES.get(x, "")
Type_Name = classmethod(Type_Name)
has_name_ = 0
name_ = ""
has_type_ = 0
type_ = 0
has_meaning_ = 0
meaning_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def type(self): return self.type_
def set_type(self, x):
self.has_type_ = 1
self.type_ = x
def clear_type(self):
if self.has_type_:
self.has_type_ = 0
self.type_ = 0
def has_type(self): return self.has_type_
def meaning(self): return self.meaning_
def set_meaning(self, x):
self.has_meaning_ = 1
self.meaning_ = x
def clear_meaning(self):
if self.has_meaning_:
self.has_meaning_ = 0
self.meaning_ = 0
def has_meaning(self): return self.has_meaning_
def MergeFrom(self, x):
assert x is not self
if (x.has_name()): self.set_name(x.name())
if (x.has_type()): self.set_type(x.type())
if (x.has_meaning()): self.set_meaning(x.meaning())
def Equals(self, x):
if x is self: return 1
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
if self.has_type_ != x.has_type_: return 0
if self.has_type_ and self.type_ != x.type_: return 0
if self.has_meaning_ != x.has_meaning_: return 0
if self.has_meaning_ and self.meaning_ != x.meaning_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: name not set.')
if (not self.has_type_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: type not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.name_))
n += self.lengthVarInt64(self.type_)
if (self.has_meaning_): n += 1 + self.lengthVarInt64(self.meaning_)
return n + 2
def Clear(self):
self.clear_name()
self.clear_type()
self.clear_meaning()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
out.putVarInt32(16)
out.putVarInt32(self.type_)
if (self.has_meaning_):
out.putVarInt32(24)
out.putVarInt32(self.meaning_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_name(d.getPrefixedString())
continue
if tt == 16:
self.set_type(d.getVarInt32())
continue
if tt == 24:
self.set_meaning(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
if self.has_type_: res+=prefix+("type: %s\n" % self.DebugFormatInt32(self.type_))
if self.has_meaning_: res+=prefix+("meaning: %s\n" % self.DebugFormatInt32(self.meaning_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kname = 1
ktype = 2
kmeaning = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "name",
2: "type",
3: "meaning",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.NUMERIC,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class SubscribeRequest(ProtocolBuffer.ProtocolMessage):
has_topic_ = 0
topic_ = ""
has_sub_id_ = 0
sub_id_ = ""
has_lease_duration_sec_ = 0
lease_duration_sec_ = 0.0
has_vanilla_query_ = 0
vanilla_query_ = ""
def __init__(self, contents=None):
self.schema_entry_ = []
if contents is not None: self.MergeFromString(contents)
def topic(self): return self.topic_
def set_topic(self, x):
self.has_topic_ = 1
self.topic_ = x
def clear_topic(self):
if self.has_topic_:
self.has_topic_ = 0
self.topic_ = ""
def has_topic(self): return self.has_topic_
def sub_id(self): return self.sub_id_
def set_sub_id(self, x):
self.has_sub_id_ = 1
self.sub_id_ = x
def clear_sub_id(self):
if self.has_sub_id_:
self.has_sub_id_ = 0
self.sub_id_ = ""
def has_sub_id(self): return self.has_sub_id_
def lease_duration_sec(self): return self.lease_duration_sec_
def set_lease_duration_sec(self, x):
self.has_lease_duration_sec_ = 1
self.lease_duration_sec_ = x
def clear_lease_duration_sec(self):
if self.has_lease_duration_sec_:
self.has_lease_duration_sec_ = 0
self.lease_duration_sec_ = 0.0
def has_lease_duration_sec(self): return self.has_lease_duration_sec_
def vanilla_query(self): return self.vanilla_query_
def set_vanilla_query(self, x):
self.has_vanilla_query_ = 1
self.vanilla_query_ = x
def clear_vanilla_query(self):
if self.has_vanilla_query_:
self.has_vanilla_query_ = 0
self.vanilla_query_ = ""
def has_vanilla_query(self): return self.has_vanilla_query_
def schema_entry_size(self): return len(self.schema_entry_)
def schema_entry_list(self): return self.schema_entry_
def schema_entry(self, i):
return self.schema_entry_[i]
def mutable_schema_entry(self, i):
return self.schema_entry_[i]
def add_schema_entry(self):
x = SchemaEntry()
self.schema_entry_.append(x)
return x
def clear_schema_entry(self):
self.schema_entry_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_topic()): self.set_topic(x.topic())
if (x.has_sub_id()): self.set_sub_id(x.sub_id())
if (x.has_lease_duration_sec()): self.set_lease_duration_sec(x.lease_duration_sec())
if (x.has_vanilla_query()): self.set_vanilla_query(x.vanilla_query())
for i in xrange(x.schema_entry_size()): self.add_schema_entry().CopyFrom(x.schema_entry(i))
def Equals(self, x):
if x is self: return 1
if self.has_topic_ != x.has_topic_: return 0
if self.has_topic_ and self.topic_ != x.topic_: return 0
if self.has_sub_id_ != x.has_sub_id_: return 0
if self.has_sub_id_ and self.sub_id_ != x.sub_id_: return 0
if self.has_lease_duration_sec_ != x.has_lease_duration_sec_: return 0
if self.has_lease_duration_sec_ and self.lease_duration_sec_ != x.lease_duration_sec_: return 0
if self.has_vanilla_query_ != x.has_vanilla_query_: return 0
if self.has_vanilla_query_ and self.vanilla_query_ != x.vanilla_query_: return 0
if len(self.schema_entry_) != len(x.schema_entry_): return 0
for e1, e2 in zip(self.schema_entry_, x.schema_entry_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_topic_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: topic not set.')
if (not self.has_sub_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: sub_id not set.')
if (not self.has_lease_duration_sec_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: lease_duration_sec not set.')
if (not self.has_vanilla_query_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: vanilla_query not set.')
for p in self.schema_entry_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.topic_))
n += self.lengthString(len(self.sub_id_))
n += self.lengthString(len(self.vanilla_query_))
n += 1 * len(self.schema_entry_)
for i in xrange(len(self.schema_entry_)): n += self.lengthString(self.schema_entry_[i].ByteSize())
return n + 12
def Clear(self):
self.clear_topic()
self.clear_sub_id()
self.clear_lease_duration_sec()
self.clear_vanilla_query()
self.clear_schema_entry()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.topic_)
out.putVarInt32(18)
out.putPrefixedString(self.sub_id_)
out.putVarInt32(25)
out.putDouble(self.lease_duration_sec_)
out.putVarInt32(34)
out.putPrefixedString(self.vanilla_query_)
for i in xrange(len(self.schema_entry_)):
out.putVarInt32(42)
out.putVarInt32(self.schema_entry_[i].ByteSize())
self.schema_entry_[i].OutputUnchecked(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_topic(d.getPrefixedString())
continue
if tt == 18:
self.set_sub_id(d.getPrefixedString())
continue
if tt == 25:
self.set_lease_duration_sec(d.getDouble())
continue
if tt == 34:
self.set_vanilla_query(d.getPrefixedString())
continue
if tt == 42:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_schema_entry().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_topic_: res+=prefix+("topic: %s\n" % self.DebugFormatString(self.topic_))
if self.has_sub_id_: res+=prefix+("sub_id: %s\n" % self.DebugFormatString(self.sub_id_))
if self.has_lease_duration_sec_: res+=prefix+("lease_duration_sec: %s\n" % self.DebugFormat(self.lease_duration_sec_))
if self.has_vanilla_query_: res+=prefix+("vanilla_query: %s\n" % self.DebugFormatString(self.vanilla_query_))
cnt=0
for e in self.schema_entry_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("schema_entry%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
ktopic = 1
ksub_id = 2
klease_duration_sec = 3
kvanilla_query = 4
kschema_entry = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "topic",
2: "sub_id",
3: "lease_duration_sec",
4: "vanilla_query",
5: "schema_entry",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.DOUBLE,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.STRING,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class SubscribeResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n + 0
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class UnsubscribeRequest(ProtocolBuffer.ProtocolMessage):
has_topic_ = 0
topic_ = ""
has_sub_id_ = 0
sub_id_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def topic(self): return self.topic_
def set_topic(self, x):
self.has_topic_ = 1
self.topic_ = x
def clear_topic(self):
if self.has_topic_:
self.has_topic_ = 0
self.topic_ = ""
def has_topic(self): return self.has_topic_
def sub_id(self): return self.sub_id_
def set_sub_id(self, x):
self.has_sub_id_ = 1
self.sub_id_ = x
def clear_sub_id(self):
if self.has_sub_id_:
self.has_sub_id_ = 0
self.sub_id_ = ""
def has_sub_id(self): return self.has_sub_id_
def MergeFrom(self, x):
assert x is not self
if (x.has_topic()): self.set_topic(x.topic())
if (x.has_sub_id()): self.set_sub_id(x.sub_id())
def Equals(self, x):
if x is self: return 1
if self.has_topic_ != x.has_topic_: return 0
if self.has_topic_ and self.topic_ != x.topic_: return 0
if self.has_sub_id_ != x.has_sub_id_: return 0
if self.has_sub_id_ and self.sub_id_ != x.sub_id_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_topic_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: topic not set.')
if (not self.has_sub_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: sub_id not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.topic_))
n += self.lengthString(len(self.sub_id_))
return n + 2
def Clear(self):
self.clear_topic()
self.clear_sub_id()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.topic_)
out.putVarInt32(18)
out.putPrefixedString(self.sub_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_topic(d.getPrefixedString())
continue
if tt == 18:
self.set_sub_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_topic_: res+=prefix+("topic: %s\n" % self.DebugFormatString(self.topic_))
if self.has_sub_id_: res+=prefix+("sub_id: %s\n" % self.DebugFormatString(self.sub_id_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
ktopic = 1
ksub_id = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "topic",
2: "sub_id",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class UnsubscribeResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n + 0
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class SubscriptionRecord(ProtocolBuffer.ProtocolMessage):
OK = 0
PENDING = 1
ERROR = 2
_State_NAMES = {
0: "OK",
1: "PENDING",
2: "ERROR",
}
def State_Name(cls, x): return cls._State_NAMES.get(x, "")
State_Name = classmethod(State_Name)
has_id_ = 0
id_ = ""
has_vanilla_query_ = 0
vanilla_query_ = ""
has_expiration_time_sec_ = 0
expiration_time_sec_ = 0.0
has_state_ = 0
state_ = 0
has_error_message_ = 0
error_message_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def id(self): return self.id_
def set_id(self, x):
self.has_id_ = 1
self.id_ = x
def clear_id(self):
if self.has_id_:
self.has_id_ = 0
self.id_ = ""
def has_id(self): return self.has_id_
def vanilla_query(self): return self.vanilla_query_
def set_vanilla_query(self, x):
self.has_vanilla_query_ = 1
self.vanilla_query_ = x
def clear_vanilla_query(self):
if self.has_vanilla_query_:
self.has_vanilla_query_ = 0
self.vanilla_query_ = ""
def has_vanilla_query(self): return self.has_vanilla_query_
def expiration_time_sec(self): return self.expiration_time_sec_
def set_expiration_time_sec(self, x):
self.has_expiration_time_sec_ = 1
self.expiration_time_sec_ = x
def clear_expiration_time_sec(self):
if self.has_expiration_time_sec_:
self.has_expiration_time_sec_ = 0
self.expiration_time_sec_ = 0.0
def has_expiration_time_sec(self): return self.has_expiration_time_sec_
def state(self): return self.state_
def set_state(self, x):
self.has_state_ = 1
self.state_ = x
def clear_state(self):
if self.has_state_:
self.has_state_ = 0
self.state_ = 0
def has_state(self): return self.has_state_
def error_message(self): return self.error_message_
def set_error_message(self, x):
self.has_error_message_ = 1
self.error_message_ = x
def clear_error_message(self):
if self.has_error_message_:
self.has_error_message_ = 0
self.error_message_ = ""
def has_error_message(self): return self.has_error_message_
def MergeFrom(self, x):
assert x is not self
if (x.has_id()): self.set_id(x.id())
if (x.has_vanilla_query()): self.set_vanilla_query(x.vanilla_query())
if (x.has_expiration_time_sec()): self.set_expiration_time_sec(x.expiration_time_sec())
if (x.has_state()): self.set_state(x.state())
if (x.has_error_message()): self.set_error_message(x.error_message())
def Equals(self, x):
if x is self: return 1
if self.has_id_ != x.has_id_: return 0
if self.has_id_ and self.id_ != x.id_: return 0
if self.has_vanilla_query_ != x.has_vanilla_query_: return 0
if self.has_vanilla_query_ and self.vanilla_query_ != x.vanilla_query_: return 0
if self.has_expiration_time_sec_ != x.has_expiration_time_sec_: return 0
if self.has_expiration_time_sec_ and self.expiration_time_sec_ != x.expiration_time_sec_: return 0
if self.has_state_ != x.has_state_: return 0
if self.has_state_ and self.state_ != x.state_: return 0
if self.has_error_message_ != x.has_error_message_: return 0
if self.has_error_message_ and self.error_message_ != x.error_message_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: id not set.')
if (not self.has_vanilla_query_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: vanilla_query not set.')
if (not self.has_expiration_time_sec_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: expiration_time_sec not set.')
if (not self.has_state_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: state not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.id_))
n += self.lengthString(len(self.vanilla_query_))
n += self.lengthVarInt64(self.state_)
if (self.has_error_message_): n += 1 + self.lengthString(len(self.error_message_))
return n + 12
def Clear(self):
self.clear_id()
self.clear_vanilla_query()
self.clear_expiration_time_sec()
self.clear_state()
self.clear_error_message()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.id_)
out.putVarInt32(18)
out.putPrefixedString(self.vanilla_query_)
out.putVarInt32(25)
out.putDouble(self.expiration_time_sec_)
out.putVarInt32(32)
out.putVarInt32(self.state_)
if (self.has_error_message_):
out.putVarInt32(42)
out.putPrefixedString(self.error_message_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_id(d.getPrefixedString())
continue
if tt == 18:
self.set_vanilla_query(d.getPrefixedString())
continue
if tt == 25:
self.set_expiration_time_sec(d.getDouble())
continue
if tt == 32:
self.set_state(d.getVarInt32())
continue
if tt == 42:
self.set_error_message(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_id_: res+=prefix+("id: %s\n" % self.DebugFormatString(self.id_))
if self.has_vanilla_query_: res+=prefix+("vanilla_query: %s\n" % self.DebugFormatString(self.vanilla_query_))
if self.has_expiration_time_sec_: res+=prefix+("expiration_time_sec: %s\n" % self.DebugFormat(self.expiration_time_sec_))
if self.has_state_: res+=prefix+("state: %s\n" % self.DebugFormatInt32(self.state_))
if self.has_error_message_: res+=prefix+("error_message: %s\n" % self.DebugFormatString(self.error_message_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kid = 1
kvanilla_query = 2
kexpiration_time_sec = 3
kstate = 4
kerror_message = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "id",
2: "vanilla_query",
3: "expiration_time_sec",
4: "state",
5: "error_message",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.DOUBLE,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.STRING,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class ListSubscriptionsRequest(ProtocolBuffer.ProtocolMessage):
has_topic_ = 0
topic_ = ""
has_max_results_ = 0
max_results_ = 1000
has_expires_before_ = 0
expires_before_ = 0
has_subscription_id_start_ = 0
subscription_id_start_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def topic(self): return self.topic_
def set_topic(self, x):
self.has_topic_ = 1
self.topic_ = x
def clear_topic(self):
if self.has_topic_:
self.has_topic_ = 0
self.topic_ = ""
def has_topic(self): return self.has_topic_
def max_results(self): return self.max_results_
def set_max_results(self, x):
self.has_max_results_ = 1
self.max_results_ = x
def clear_max_results(self):
if self.has_max_results_:
self.has_max_results_ = 0
self.max_results_ = 1000
def has_max_results(self): return self.has_max_results_
def expires_before(self): return self.expires_before_
def set_expires_before(self, x):
self.has_expires_before_ = 1
self.expires_before_ = x
def clear_expires_before(self):
if self.has_expires_before_:
self.has_expires_before_ = 0
self.expires_before_ = 0
def has_expires_before(self): return self.has_expires_before_
def subscription_id_start(self): return self.subscription_id_start_
def set_subscription_id_start(self, x):
self.has_subscription_id_start_ = 1
self.subscription_id_start_ = x
def clear_subscription_id_start(self):
if self.has_subscription_id_start_:
self.has_subscription_id_start_ = 0
self.subscription_id_start_ = ""
def has_subscription_id_start(self): return self.has_subscription_id_start_
def MergeFrom(self, x):
assert x is not self
if (x.has_topic()): self.set_topic(x.topic())
if (x.has_max_results()): self.set_max_results(x.max_results())
if (x.has_expires_before()): self.set_expires_before(x.expires_before())
if (x.has_subscription_id_start()): self.set_subscription_id_start(x.subscription_id_start())
def Equals(self, x):
if x is self: return 1
if self.has_topic_ != x.has_topic_: return 0
if self.has_topic_ and self.topic_ != x.topic_: return 0
if self.has_max_results_ != x.has_max_results_: return 0
if self.has_max_results_ and self.max_results_ != x.max_results_: return 0
if self.has_expires_before_ != x.has_expires_before_: return 0
if self.has_expires_before_ and self.expires_before_ != x.expires_before_: return 0
if self.has_subscription_id_start_ != x.has_subscription_id_start_: return 0
if self.has_subscription_id_start_ and self.subscription_id_start_ != x.subscription_id_start_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_topic_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: topic not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.topic_))
if (self.has_max_results_): n += 1 + self.lengthVarInt64(self.max_results_)
if (self.has_expires_before_): n += 1 + self.lengthVarInt64(self.expires_before_)
if (self.has_subscription_id_start_): n += 1 + self.lengthString(len(self.subscription_id_start_))
return n + 1
def Clear(self):
self.clear_topic()
self.clear_max_results()
self.clear_expires_before()
self.clear_subscription_id_start()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.topic_)
if (self.has_max_results_):
out.putVarInt32(16)
out.putVarInt64(self.max_results_)
if (self.has_expires_before_):
out.putVarInt32(24)
out.putVarInt64(self.expires_before_)
if (self.has_subscription_id_start_):
out.putVarInt32(34)
out.putPrefixedString(self.subscription_id_start_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_topic(d.getPrefixedString())
continue
if tt == 16:
self.set_max_results(d.getVarInt64())
continue
if tt == 24:
self.set_expires_before(d.getVarInt64())
continue
if tt == 34:
self.set_subscription_id_start(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_topic_: res+=prefix+("topic: %s\n" % self.DebugFormatString(self.topic_))
if self.has_max_results_: res+=prefix+("max_results: %s\n" % self.DebugFormatInt64(self.max_results_))
if self.has_expires_before_: res+=prefix+("expires_before: %s\n" % self.DebugFormatInt64(self.expires_before_))
if self.has_subscription_id_start_: res+=prefix+("subscription_id_start: %s\n" % self.DebugFormatString(self.subscription_id_start_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
ktopic = 1
kmax_results = 2
kexpires_before = 3
ksubscription_id_start = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "topic",
2: "max_results",
3: "expires_before",
4: "subscription_id_start",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.STRING,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class ListSubscriptionsResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.subscription_ = []
if contents is not None: self.MergeFromString(contents)
def subscription_size(self): return len(self.subscription_)
def subscription_list(self): return self.subscription_
def subscription(self, i):
return self.subscription_[i]
def mutable_subscription(self, i):
return self.subscription_[i]
def add_subscription(self):
x = SubscriptionRecord()
self.subscription_.append(x)
return x
def clear_subscription(self):
self.subscription_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.subscription_size()): self.add_subscription().CopyFrom(x.subscription(i))
def Equals(self, x):
if x is self: return 1
if len(self.subscription_) != len(x.subscription_): return 0
for e1, e2 in zip(self.subscription_, x.subscription_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.subscription_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.subscription_)
for i in xrange(len(self.subscription_)): n += self.lengthString(self.subscription_[i].ByteSize())
return n + 0
def Clear(self):
self.clear_subscription()
def OutputUnchecked(self, out):
for i in xrange(len(self.subscription_)):
out.putVarInt32(10)
out.putVarInt32(self.subscription_[i].ByteSize())
self.subscription_[i].OutputUnchecked(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_subscription().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.subscription_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("subscription%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
ksubscription = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "subscription",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class MatchRequest(ProtocolBuffer.ProtocolMessage):
DICT = 1
ENTITY = 2
MODEL = 3
_PythonDocumentClass_NAMES = {
1: "DICT",
2: "ENTITY",
3: "MODEL",
}
def PythonDocumentClass_Name(cls, x): return cls._PythonDocumentClass_NAMES.get(x, "")
PythonDocumentClass_Name = classmethod(PythonDocumentClass_Name)
has_topic_ = 0
topic_ = ""
has_document_ = 0
has_result_batch_size_ = 0
result_batch_size_ = 0
has_result_task_queue_ = 0
result_task_queue_ = ""
has_result_relative_url_ = 0
result_relative_url_ = ""
has_result_key_ = 0
result_key_ = ""
has_result_python_document_class_ = 0
result_python_document_class_ = 0
def __init__(self, contents=None):
self.document_ = EntityProto()
if contents is not None: self.MergeFromString(contents)
def topic(self): return self.topic_
def set_topic(self, x):
self.has_topic_ = 1
self.topic_ = x
def clear_topic(self):
if self.has_topic_:
self.has_topic_ = 0
self.topic_ = ""
def has_topic(self): return self.has_topic_
def document(self): return self.document_
def mutable_document(self): self.has_document_ = 1; return self.document_
def clear_document(self):self.has_document_ = 0; self.document_.Clear()
def has_document(self): return self.has_document_
def result_batch_size(self): return self.result_batch_size_
def set_result_batch_size(self, x):
self.has_result_batch_size_ = 1
self.result_batch_size_ = x
def clear_result_batch_size(self):
if self.has_result_batch_size_:
self.has_result_batch_size_ = 0
self.result_batch_size_ = 0
def has_result_batch_size(self): return self.has_result_batch_size_
def result_task_queue(self): return self.result_task_queue_
def set_result_task_queue(self, x):
self.has_result_task_queue_ = 1
self.result_task_queue_ = x
def clear_result_task_queue(self):
if self.has_result_task_queue_:
self.has_result_task_queue_ = 0
self.result_task_queue_ = ""
def has_result_task_queue(self): return self.has_result_task_queue_
def result_relative_url(self): return self.result_relative_url_
def set_result_relative_url(self, x):
self.has_result_relative_url_ = 1
self.result_relative_url_ = x
def clear_result_relative_url(self):
if self.has_result_relative_url_:
self.has_result_relative_url_ = 0
self.result_relative_url_ = ""
def has_result_relative_url(self): return self.has_result_relative_url_
def result_key(self): return self.result_key_
def set_result_key(self, x):
self.has_result_key_ = 1
self.result_key_ = x
def clear_result_key(self):
if self.has_result_key_:
self.has_result_key_ = 0
self.result_key_ = ""
def has_result_key(self): return self.has_result_key_
def result_python_document_class(self): return self.result_python_document_class_
def set_result_python_document_class(self, x):
self.has_result_python_document_class_ = 1
self.result_python_document_class_ = x
def clear_result_python_document_class(self):
if self.has_result_python_document_class_:
self.has_result_python_document_class_ = 0
self.result_python_document_class_ = 0
def has_result_python_document_class(self): return self.has_result_python_document_class_
def MergeFrom(self, x):
assert x is not self
if (x.has_topic()): self.set_topic(x.topic())
if (x.has_document()): self.mutable_document().MergeFrom(x.document())
if (x.has_result_batch_size()): self.set_result_batch_size(x.result_batch_size())
if (x.has_result_task_queue()): self.set_result_task_queue(x.result_task_queue())
if (x.has_result_relative_url()): self.set_result_relative_url(x.result_relative_url())
if (x.has_result_key()): self.set_result_key(x.result_key())
if (x.has_result_python_document_class()): self.set_result_python_document_class(x.result_python_document_class())
def Equals(self, x):
if x is self: return 1
if self.has_topic_ != x.has_topic_: return 0
if self.has_topic_ and self.topic_ != x.topic_: return 0
if self.has_document_ != x.has_document_: return 0
if self.has_document_ and self.document_ != x.document_: return 0
if self.has_result_batch_size_ != x.has_result_batch_size_: return 0
if self.has_result_batch_size_ and self.result_batch_size_ != x.result_batch_size_: return 0
if self.has_result_task_queue_ != x.has_result_task_queue_: return 0
if self.has_result_task_queue_ and self.result_task_queue_ != x.result_task_queue_: return 0
if self.has_result_relative_url_ != x.has_result_relative_url_: return 0
if self.has_result_relative_url_ and self.result_relative_url_ != x.result_relative_url_: return 0
if self.has_result_key_ != x.has_result_key_: return 0
if self.has_result_key_ and self.result_key_ != x.result_key_: return 0
if self.has_result_python_document_class_ != x.has_result_python_document_class_: return 0
if self.has_result_python_document_class_ and self.result_python_document_class_ != x.result_python_document_class_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_topic_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: topic not set.')
if (not self.has_document_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: document not set.')
elif not self.document_.IsInitialized(debug_strs): initialized = 0
if (not self.has_result_batch_size_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: result_batch_size not set.')
if (not self.has_result_task_queue_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: result_task_queue not set.')
if (not self.has_result_relative_url_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: result_relative_url not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.topic_))
n += self.lengthString(self.document_.ByteSize())
n += self.lengthVarInt64(self.result_batch_size_)
n += self.lengthString(len(self.result_task_queue_))
n += self.lengthString(len(self.result_relative_url_))
if (self.has_result_key_): n += 1 + self.lengthString(len(self.result_key_))
if (self.has_result_python_document_class_): n += 1 + self.lengthVarInt64(self.result_python_document_class_)
return n + 5
def Clear(self):
self.clear_topic()
self.clear_document()
self.clear_result_batch_size()
self.clear_result_task_queue()
self.clear_result_relative_url()
self.clear_result_key()
self.clear_result_python_document_class()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.topic_)
out.putVarInt32(18)
out.putVarInt32(self.document_.ByteSize())
self.document_.OutputUnchecked(out)
out.putVarInt32(24)
out.putVarInt32(self.result_batch_size_)
out.putVarInt32(34)
out.putPrefixedString(self.result_task_queue_)
out.putVarInt32(42)
out.putPrefixedString(self.result_relative_url_)
if (self.has_result_key_):
out.putVarInt32(50)
out.putPrefixedString(self.result_key_)
if (self.has_result_python_document_class_):
out.putVarInt32(56)
out.putVarInt32(self.result_python_document_class_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_topic(d.getPrefixedString())
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_document().TryMerge(tmp)
continue
if tt == 24:
self.set_result_batch_size(d.getVarInt32())
continue
if tt == 34:
self.set_result_task_queue(d.getPrefixedString())
continue
if tt == 42:
self.set_result_relative_url(d.getPrefixedString())
continue
if tt == 50:
self.set_result_key(d.getPrefixedString())
continue
if tt == 56:
self.set_result_python_document_class(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_topic_: res+=prefix+("topic: %s\n" % self.DebugFormatString(self.topic_))
if self.has_document_:
res+=prefix+"document <\n"
res+=self.document_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_result_batch_size_: res+=prefix+("result_batch_size: %s\n" % self.DebugFormatInt32(self.result_batch_size_))
if self.has_result_task_queue_: res+=prefix+("result_task_queue: %s\n" % self.DebugFormatString(self.result_task_queue_))
if self.has_result_relative_url_: res+=prefix+("result_relative_url: %s\n" % self.DebugFormatString(self.result_relative_url_))
if self.has_result_key_: res+=prefix+("result_key: %s\n" % self.DebugFormatString(self.result_key_))
if self.has_result_python_document_class_: res+=prefix+("result_python_document_class: %s\n" % self.DebugFormatInt32(self.result_python_document_class_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
ktopic = 1
kdocument = 2
kresult_batch_size = 3
kresult_task_queue = 4
kresult_relative_url = 5
kresult_key = 6
kresult_python_document_class = 7
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "topic",
2: "document",
3: "result_batch_size",
4: "result_task_queue",
5: "result_relative_url",
6: "result_key",
7: "result_python_document_class",
}, 7)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.STRING,
7: ProtocolBuffer.Encoder.NUMERIC,
}, 7, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class MatchResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n + 0
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
__all__ = ['SchemaEntry','SubscribeRequest','SubscribeResponse','UnsubscribeRequest','UnsubscribeResponse','SubscriptionRecord','ListSubscriptionsRequest','ListSubscriptionsResponse','MatchRequest','MatchResponse']
|
|
"""Low-level utilities used internally by NDB.
These are not meant for use by code outside NDB.
"""
import logging
import os
import sys
import threading
__all__ = []
DEBUG = True # Set to False for some speedups
def logging_debug(*args):
# NOTE: If you want to see debug messages, set the logging level
# manually to logging.DEBUG - 1; or for tests use -v -v -v (see below).
if DEBUG and logging.getLogger().level < logging.DEBUG:
logging.debug(*args)
def wrapping(wrapped):
# A decorator to decorate a decorator's wrapper. Following the lead
# of Twisted and Monocle, this is supposed to make debugging heavily
# decorated code easier. We'll see...
# TODO: Evaluate; so far it hasn't helped, and it has hurt some.
def wrapping_wrapper(wrapper):
try:
wrapper.__wrapped__ = wrapped
wrapper.__name__ = wrapped.__name__
wrapper.__doc__ = wrapped.__doc__
wrapper.__dict__.update(wrapped.__dict__)
except Exception:
pass
return wrapper
return wrapping_wrapper
# Define a base class for classes that need to be thread-local.
# This is pretty subtle; we want to use threading.local if threading
# is supported, but object if it is not.
if threading.local.__module__ == 'thread':
logging_debug('Using threading.local')
threading_local = threading.local
else:
logging_debug('Not using threading.local')
threading_local = object
def get_stack(limit=10):
# Return a list of strings showing where the current frame was called.
if not DEBUG:
return ()
frame = sys._getframe(1) # Always skip get_stack() itself.
lines = []
while len(lines) < limit and frame is not None:
f_locals = frame.f_locals
ndb_debug = f_locals.get('__ndb_debug__')
if ndb_debug != 'SKIP':
line = frame_info(frame)
if ndb_debug is not None:
line += ' # ' + str(ndb_debug)
lines.append(line)
frame = frame.f_back
return lines
def func_info(func, lineno=None):
if not DEBUG:
return None
func = getattr(func, '__wrapped__', func)
code = getattr(func, 'func_code', None)
return code_info(code, lineno)
def gen_info(gen):
if not DEBUG:
return None
frame = gen.gi_frame
if gen.gi_running:
prefix = 'running generator '
elif frame:
if frame.f_lasti < 0:
prefix = 'initial generator '
else:
prefix = 'suspended generator '
else:
prefix = 'terminated generator '
if frame:
return prefix + frame_info(frame)
code = getattr(gen, 'gi_code', None)
if code:
return prefix + code_info(code)
return prefix + hex(id(gen))
def frame_info(frame):
if not DEBUG:
return None
return code_info(frame.f_code, frame.f_lineno)
def code_info(code, lineno=None):
if not DEBUG or not code:
return ''
funcname = code.co_name
# TODO: Be cleverer about stripping filename,
# e.g. strip based on sys.path.
filename = os.path.basename(code.co_filename)
if lineno is None:
lineno = code.co_firstlineno
return '%s(%s:%s)' % (funcname, filename, lineno)
def positional(max_pos_args):
"""A decorator to declare that only the first N arguments may be positional.
Note that for methods, n includes 'self'.
"""
__ndb_debug__ = 'SKIP'
def positional_decorator(wrapped):
if not DEBUG:
return wrapped
__ndb_debug__ = 'SKIP'
@wrapping(wrapped)
def positional_wrapper(*args, **kwds):
__ndb_debug__ = 'SKIP'
if len(args) > max_pos_args:
plural_s = ''
if max_pos_args != 1:
plural_s = 's'
raise TypeError(
'%s() takes at most %d positional argument%s (%d given)' %
(wrapped.__name__, max_pos_args, plural_s, len(args)))
return wrapped(*args, **kwds)
return positional_wrapper
return positional_decorator
def decorator(wrapped_decorator):
"""Converts a function into a decorator that optionally accepts keyword
arguments in its declaration.
Example usage:
@utils.decorator
def decorator(func, args, kwds, op1=None):
... apply op1 ...
return func(*args, **kwds)
# Form (1), vanilla
@decorator
foo(...)
...
# Form (2), with options
@decorator(op1=5)
foo(...)
...
Args:
wrapped_decorator: A function that accepts positional args (func, args,
kwds) and any additional supported keyword arguments.
Returns:
A decorator with an additional 'wrapped_decorator' property that is set to
the original function.
"""
def helper(_func=None, **options):
def outer_wrapper(func):
@wrapping(func)
def inner_wrapper(*args, **kwds):
return wrapped_decorator(func, args, kwds, **options)
return inner_wrapper
if _func is None:
# Form (2), with options.
return outer_wrapper
# Form (1), vanilla.
if options:
# Don't allow @decorator(foo, op1=5).
raise TypeError('positional arguments not supported')
return outer_wrapper(_func)
helper.wrapped_decorator = wrapped_decorator
return helper
def tweak_logging():
# Hack for running tests with verbose logging. If there are two or
# more -v flags, turn on INFO logging; if there are 3 or more, DEBUG.
# (A single -v just tells unittest.main() to print the name of each
# test; we don't want to interfere with that.)
# Also, if there is a -q flag, set DEBUG to False, suppressing more
# debug info even from warnings.
q = 0
v = 0
for arg in sys.argv[1:]:
if arg.startswith('-v'):
v += arg.count('v')
if arg.startswith('-q'):
q += arg.count('q')
if v >= 2:
level = logging.INFO
if v >= 3:
level = logging.DEBUG - 1
logging.basicConfig(level=level)
if q > 0:
global DEBUG
DEBUG = False
if 'test' in os.path.basename(sys.argv[0]):
tweak_logging()
|
|
from django.core.serializers.json import json, DjangoJSONEncoder
from ..channel import Group, Channel
from ..auth import channel_session_user_from_http
from ..sessions import enforce_ordering
from .base import BaseConsumer
class WebsocketConsumer(BaseConsumer):
"""
Base WebSocket consumer. Provides a general encapsulation for the
WebSocket handling model that other applications can build on.
"""
# You shouldn't need to override this
method_mapping = {
"websocket.connect": "raw_connect",
"websocket.receive": "raw_receive",
"websocket.disconnect": "raw_disconnect",
}
# Turning this on passes the user over from the HTTP session on connect,
# implies channel_session_user
http_user = False
# Set to True if you want the class to enforce ordering for you
slight_ordering = False
strict_ordering = False
groups = None
def get_handler(self, message, **kwargs):
"""
Pulls out the path onto an instance variable, and optionally
adds the ordering decorator.
"""
# HTTP user implies channel session user
if self.http_user:
self.channel_session_user = True
# Get super-handler
self.path = message['path']
handler = super(WebsocketConsumer, self).get_handler(message, **kwargs)
# Optionally apply HTTP transfer
if self.http_user:
handler = channel_session_user_from_http(handler)
# Ordering decorators
if self.strict_ordering:
return enforce_ordering(handler, slight=False)
elif self.slight_ordering:
raise ValueError("Slight ordering is now always on. Please remove `slight_ordering=True`.")
else:
return handler
def connection_groups(self, **kwargs):
"""
Group(s) to make people join when they connect and leave when they
disconnect. Make sure to return a list/tuple, not a string!
"""
return self.groups or []
def raw_connect(self, message, **kwargs):
"""
Called when a WebSocket connection is opened. Base level so you don't
need to call super() all the time.
"""
for group in self.connection_groups(**kwargs):
Group(group, channel_layer=message.channel_layer).add(message.reply_channel)
self.connect(message, **kwargs)
def connect(self, message, **kwargs):
"""
Called when a WebSocket connection is opened.
"""
pass
def raw_receive(self, message, **kwargs):
"""
Called when a WebSocket frame is received. Decodes it and passes it
to receive().
"""
if "text" in message:
self.receive(text=message['text'], **kwargs)
else:
self.receive(bytes=message['bytes'], **kwargs)
def receive(self, text=None, bytes=None, **kwargs):
"""
Called with a decoded WebSocket frame.
"""
pass
def send(self, text=None, bytes=None, close=False):
"""
Sends a reply back down the WebSocket
"""
message = {}
if close:
message["close"] = True
if text is not None:
message["text"] = text
elif bytes is not None:
message["bytes"] = bytes
else:
raise ValueError("You must pass text or bytes")
self.message.reply_channel.send(message)
@classmethod
def group_send(cls, name, text=None, bytes=None, close=False):
message = {}
if close:
message["close"] = True
if text is not None:
message["text"] = text
elif bytes is not None:
message["bytes"] = bytes
else:
raise ValueError("You must pass text or bytes")
Group(name).send(message)
def close(self):
"""
Closes the WebSocket from the server end
"""
self.message.reply_channel.send({"close": True})
def raw_disconnect(self, message, **kwargs):
"""
Called when a WebSocket connection is closed. Base level so you don't
need to call super() all the time.
"""
for group in self.connection_groups(**kwargs):
Group(group, channel_layer=message.channel_layer).discard(message.reply_channel)
self.disconnect(message, **kwargs)
def disconnect(self, message, **kwargs):
"""
Called when a WebSocket connection is opened.
"""
pass
class JsonWebsocketConsumer(WebsocketConsumer):
"""
Variant of WebsocketConsumer that automatically JSON-encodes and decodes
messages as they come in and go out. Expects everything to be text; will
error on binary data.
"""
def raw_receive(self, message, **kwargs):
if "text" in message:
self.receive(json.loads(message['text']), **kwargs)
else:
raise ValueError("No text section for incoming WebSocket frame!")
def receive(self, content, **kwargs):
"""
Called with decoded JSON content.
"""
pass
def send(self, content, close=False):
"""
Encode the given content as JSON and send it to the client.
"""
super(JsonWebsocketConsumer, self).send(text=json.dumps(content), close=close)
@classmethod
def group_send(cls, name, content, close=False):
WebsocketConsumer.group_send(name, json.dumps(content), close=close)
class WebsocketDemultiplexer(JsonWebsocketConsumer):
"""
JSON-understanding WebSocket consumer subclass that handles demultiplexing
streams using a "stream" key in a top-level dict and the actual payload
in a sub-dict called "payload". This lets you run multiple streams over
a single WebSocket connection in a standardised way.
Incoming messages on streams are mapped into a custom channel so you can
just tie in consumers the normal way. The reply_channels are kept so
sessions/auth continue to work. Payloads must be a dict at the top level,
so they fulfill the Channels message spec.
Set a mapping from streams to channels in the "mapping" key. We make you
whitelist channels like this to allow different namespaces and for security
reasons (imagine if someone could inject straight into websocket.receive).
"""
mapping = {}
def receive(self, content, **kwargs):
# Check the frame looks good
if isinstance(content, dict) and "stream" in content and "payload" in content:
# Match it to a channel
stream = content['stream']
if stream in self.mapping:
# Extract payload and add in reply_channel
payload = content['payload']
if not isinstance(payload, dict):
raise ValueError("Multiplexed frame payload is not a dict")
payload['reply_channel'] = self.message['reply_channel']
# Send it onto the new channel
Channel(self.mapping[stream]).send(payload)
else:
raise ValueError("Invalid multiplexed frame received (stream not mapped)")
else:
raise ValueError("Invalid multiplexed frame received (no channel/payload key)")
def send(self, stream, payload):
self.message.reply_channel.send(self.encode(stream, payload))
@classmethod
def group_send(cls, name, stream, payload, close=False):
message = cls.encode(stream, payload)
if close:
message["close"] = True
Group(name).send(message)
@classmethod
def encode(cls, stream, payload):
"""
Encodes stream + payload for outbound sending.
"""
return {"text": json.dumps({
"stream": stream,
"payload": payload,
}, cls=DjangoJSONEncoder)}
|
|
"""
Record Arrays
=============
Record arrays expose the fields of structured arrays as properties.
Most commonly, ndarrays contain elements of a single type, e.g. floats, integers,
bools etc. However, it is possible for elements to be combinations of these,
such as::
>>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', int), ('y', float)])
>>> a
array([(1, 2.0), (1, 2.0)],
dtype=[('x', '<i4'), ('y', '<f8')])
Here, each element consists of two fields: x (and int), and y (a float).
This is known as a structured array. The different fields are analogous
to columns in a spread-sheet. The different fields can be accessed as
one would a dictionary::
>>> a['x']
array([1, 1])
>>> a['y']
array([ 2., 2.])
Record arrays allow us to access fields as properties::
>>> ar = a.view(np.recarray)
>>> ar.x
array([1, 1])
>>> ar.y
array([ 2., 2.])
"""
# All of the functions allow formats to be a dtype
__all__ = ['record', 'recarray', 'format_parser']
import numeric as sb
from defchararray import chararray
import numerictypes as nt
import types
import os
import sys
ndarray = sb.ndarray
_byteorderconv = {'b':'>',
'l':'<',
'n':'=',
'B':'>',
'L':'<',
'N':'=',
'S':'s',
's':'s',
'>':'>',
'<':'<',
'=':'=',
'|':'|',
'I':'|',
'i':'|'}
# formats regular expression
# allows multidimension spec with a tuple syntax in front
# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 '
# are equally allowed
numfmt = nt.typeDict
_typestr = nt._typestr
def find_duplicate(list):
"""Find duplication in a list, return a list of duplicated elements"""
dup = []
for i in range(len(list)):
if (list[i] in list[i+1:]):
if (list[i] not in dup):
dup.append(list[i])
return dup
class format_parser:
"""Class to convert formats, names, titles description to a dtype
After constructing the format_parser object, the dtype attribute is
the converted data-type.
dtype = format_parser(formats, names, titles).dtype
Parameters
----------
formats : string or list
comma-separated format descriptions --- 'f8, i4, a5'
list of format description strings --- ['f8', 'i4', 'a5']
names : string or (list or tuple of strings)
comma-separated field names --- 'col1, col2, col3'
list or tuple of field names
titles : sequence
sequence of title strings or unicode
aligned : bool
align the fields by padding as the C-compiler would
byteorder :
If specified, all the fields will be changed to the
provided byteorder. Otherwise, the default byteorder is
used.
Returns
-------
object
A Python object whose dtype attribute is a data-type.
"""
def __init__(self, formats, names, titles, aligned=False, byteorder=None):
self._parseFormats(formats, aligned)
self._setfieldnames(names, titles)
self._createdescr(byteorder)
self.dtype = self._descr
def _parseFormats(self, formats, aligned=0):
""" Parse the field formats """
if formats is None:
raise ValueError, "Need formats argument"
if isinstance(formats, list):
if len(formats) < 2:
formats.append('')
formats = ','.join(formats)
dtype = sb.dtype(formats, aligned)
fields = dtype.fields
if fields is None:
dtype = sb.dtype([('f1', dtype)], aligned)
fields = dtype.fields
keys = dtype.names
self._f_formats = [fields[key][0] for key in keys]
self._offsets = [fields[key][1] for key in keys]
self._nfields = len(keys)
def _setfieldnames(self, names, titles):
"""convert input field names into a list and assign to the _names
attribute """
if (names):
if (type(names) in [types.ListType, types.TupleType]):
pass
elif (type(names) == types.StringType):
names = names.split(',')
else:
raise NameError, "illegal input names %s" % `names`
self._names = [n.strip() for n in names[:self._nfields]]
else:
self._names = []
# if the names are not specified, they will be assigned as
# "f0, f1, f2,..."
# if not enough names are specified, they will be assigned as "f[n],
# f[n+1],..." etc. where n is the number of specified names..."
self._names += ['f%d' % i for i in range(len(self._names),
self._nfields)]
# check for redundant names
_dup = find_duplicate(self._names)
if _dup:
raise ValueError, "Duplicate field names: %s" % _dup
if (titles):
self._titles = [n.strip() for n in titles[:self._nfields]]
else:
self._titles = []
titles = []
if (self._nfields > len(titles)):
self._titles += [None]*(self._nfields-len(titles))
def _createdescr(self, byteorder):
descr = sb.dtype({'names':self._names,
'formats':self._f_formats,
'offsets':self._offsets,
'titles':self._titles})
if (byteorder is not None):
byteorder = _byteorderconv[byteorder[0]]
descr = descr.newbyteorder(byteorder)
self._descr = descr
class record(nt.void):
"""A data-type scalar that allows field access as attribute lookup.
"""
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.item())
def __getattribute__(self, attr):
if attr in ['setfield', 'getfield', 'dtype']:
return nt.void.__getattribute__(self, attr)
try:
return nt.void.__getattribute__(self, attr)
except AttributeError:
pass
fielddict = nt.void.__getattribute__(self, 'dtype').fields
res = fielddict.get(attr, None)
if res:
obj = self.getfield(*res[:2])
# if it has fields return a recarray,
# if it's a string ('SU') return a chararray
# otherwise return the object
try:
dt = obj.dtype
except AttributeError:
return obj
if dt.fields:
return obj.view(obj.__class__)
if dt.char in 'SU':
return obj.view(chararray)
return obj
else:
raise AttributeError, "'record' object has no "\
"attribute '%s'" % attr
def __setattr__(self, attr, val):
if attr in ['setfield', 'getfield', 'dtype']:
raise AttributeError, "Cannot set '%s' attribute" % attr
fielddict = nt.void.__getattribute__(self, 'dtype').fields
res = fielddict.get(attr, None)
if res:
return self.setfield(val, *res[:2])
else:
if getattr(self,attr,None):
return nt.void.__setattr__(self, attr, val)
else:
raise AttributeError, "'record' object has no "\
"attribute '%s'" % attr
def pprint(self):
# pretty-print all fields
names = self.dtype.names
maxlen = max([len(name) for name in names])
rows = []
fmt = '%% %ds: %%s' %maxlen
for name in names:
rows.append(fmt%(name, getattr(self, name)))
return "\n".join(rows)
# The recarray is almost identical to a standard array (which supports
# named fields already) The biggest difference is that it can use
# attribute-lookup to find the fields and it is constructed using
# a record.
# If byteorder is given it forces a particular byteorder on all
# the fields (and any subfields)
class recarray(ndarray):
"""
Construct an ndarray that allows field access using attributes.
Arrays may have a data-types containing fields, analagous
to columns in a spread sheet. An example is ``[(x, int), (y, float)]``,
where each entry in the array is a pair of ``(int, float)``. Normally,
these attributes are accessed using dictionary lookups such as ``arr['x']``
and ``arr['y']``. Record arrays allow the fields to be accessed as members
of the array, using ``arr.x`` and ``arr.y``.
Parameters
----------
shape : tuple
Shape of output array.
dtype : data-type, optional
The desired data-type. By default, the data-type is determined
from `formats`, `names`, `titles`, `aligned` and `byteorder`.
formats : list of data-types, optional
A list containing the data-types for the different columns, e.g.
``['i4', 'f8', 'i4']``. `formats` does *not* support the new
convention of using types directly, i.e. ``(int, float, int)``.
Note that `formats` must be a list, not a tuple.
Given that `formats` is somewhat limited, we recommend specifying
`dtype` instead.
names : tuple of strings, optional
The name of each column, e.g. ``('x', 'y', 'z')``.
buf : buffer, optional
By default, a new array is created of the given shape and data-type.
If `buf` is specified and is an object exposing the buffer interface,
the array will use the memory from the existing buffer. In this case,
the `offset` and `strides` keywords are available.
Other Parameters
----------------
titles : tuple of strings, optional
Aliases for column names. For example, if `names` were
``('x', 'y', 'z')`` and `titles` is
``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then
``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``.
byteorder : {'<', '>', '='}, optional
Byte-order for all fields.
aligned : {True, False}, optional
Align the fields in memory as the C-compiler would.
strides : tuple of ints, optional
Buffer (`buf`) is interpreted according to these strides (strides
define how many bytes each array element, row, column, etc.
occupy in memory).
offset : int, optional
Start reading buffer (`buf`) from this offset onwards.
Returns
-------
rec : recarray
Empty array of the given shape and type.
See Also
--------
rec.fromrecords : Construct a record array from data.
record : fundamental data-type for recarray
format_parser : determine a data-type from formats, names, titles
Notes
-----
This constructor can be compared to ``empty``: it creates a new record
array but does not fill it with data. To create a reccord array from data,
use one of the following methods:
1. Create a standard ndarray and convert it to a record array,
using ``arr.view(np.recarray)``
2. Use the `buf` keyword.
3. Use `np.rec.fromrecords`.
Examples
--------
Create an array with two fields, ``x`` and ``y``:
>>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', float), ('y', int)])
>>> x
array([(1.0, 2), (3.0, 4)],
dtype=[('x', '<f8'), ('y', '<i4')])
>>> x['x']
array([ 1., 3.])
View the array as a record array:
>>> x = x.view(np.recarray)
>>> x.x
array([ 1., 3.])
>>> x.y
array([2, 4])
Create a new, empty record array:
>>> np.recarray((2,),
... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP
rec.array([(-1073741821, 1.2249118382103472e-301, 24547520),
(3471280, 1.2134086255804012e-316, 0)],
dtype=[('x', '<i4'), ('y', '<f8'), ('z', '<i4')])
"""
def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None,
formats=None, names=None, titles=None,
byteorder=None, aligned=False):
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
if buf is None:
self = ndarray.__new__(subtype, shape, (record, descr))
else:
self = ndarray.__new__(subtype, shape, (record, descr),
buffer=buf, offset=offset,
strides=strides)
return self
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError: # attr must be a fieldname
pass
fielddict = ndarray.__getattribute__(self,'dtype').fields
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError, "record array has no attribute %s" % attr
obj = self.getfield(*res)
# if it has fields return a recarray, otherwise return
# normal array
if obj.dtype.fields:
return obj
if obj.dtype.char in 'SU':
return obj.view(chararray)
return obj.view(ndarray)
# Save the dictionary
# If the attr is a field name and not in the saved dictionary
# Undo any "setting" of the attribute and do a setfield
# Thus, you can't create attributes on-the-fly that are field names.
def __setattr__(self, attr, val):
newattr = attr not in self.__dict__
try:
ret = object.__setattr__(self, attr, val)
except:
fielddict = ndarray.__getattribute__(self,'dtype').fields or {}
if attr not in fielddict:
exctype, value = sys.exc_info()[:2]
raise exctype, value
else:
fielddict = ndarray.__getattribute__(self,'dtype').fields or {}
if attr not in fielddict:
return ret
if newattr: # We just added this one
try: # or this setattr worked on an internal
# attribute.
object.__delattr__(self, attr)
except:
return ret
try:
res = fielddict[attr][:2]
except (TypeError,KeyError):
raise AttributeError, "record array has no attribute %s" % attr
return self.setfield(val, *res)
def __getitem__(self, indx):
obj = ndarray.__getitem__(self, indx)
if (isinstance(obj, ndarray) and obj.dtype.isbuiltin):
return obj.view(ndarray)
return obj
def __repr__(self) :
ret = ndarray.__repr__(self)
return ret.replace("recarray", "rec.array", 1)
def field(self, attr, val=None):
if isinstance(attr, int):
names = ndarray.__getattribute__(self,'dtype').names
attr = names[attr]
fielddict = ndarray.__getattribute__(self,'dtype').fields
res = fielddict[attr][:2]
if val is None:
obj = self.getfield(*res)
if obj.dtype.fields:
return obj
if obj.dtype.char in 'SU':
return obj.view(chararray)
return obj.view(ndarray)
else:
return self.setfield(val, *res)
def view(self, obj):
try:
if issubclass(obj, ndarray):
return ndarray.view(self, obj)
except TypeError:
pass
dtype = sb.dtype(obj)
if dtype.fields is None:
return self.__array__().view(dtype)
return ndarray.view(self, obj)
def fromarrays(arrayList, dtype=None, shape=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
""" create a record array from a (flat) list of arrays
>>> x1=np.array([1,2,3,4])
>>> x2=np.array(['a','dd','xyz','12'])
>>> x3=np.array([1.1,2,3,4])
>>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c')
>>> print r[1]
(2, 'dd', 2.0)
>>> x1[1]=34
>>> r.a
array([1, 2, 3, 4])
"""
arrayList = [sb.asarray(x) for x in arrayList]
if shape is None or shape == 0:
shape = arrayList[0].shape
if isinstance(shape, int):
shape = (shape,)
if formats is None and dtype is None:
# go through each object in the list to see if it is an ndarray
# and determine the formats.
formats = ''
for obj in arrayList:
if not isinstance(obj, ndarray):
raise ValueError, "item in the array list must be an ndarray."
formats += _typestr[obj.dtype.type]
if issubclass(obj.dtype.type, nt.flexible):
formats += `obj.itemsize`
formats += ','
formats = formats[:-1]
if dtype is not None:
descr = sb.dtype(dtype)
_names = descr.names
else:
parsed = format_parser(formats, names, titles, aligned, byteorder)
_names = parsed._names
descr = parsed._descr
# Determine shape from data-type.
if len(descr) != len(arrayList):
raise ValueError, "mismatch between the number of fields "\
"and the number of arrays"
d0 = descr[0].shape
nn = len(d0)
if nn > 0:
shape = shape[:-nn]
for k, obj in enumerate(arrayList):
nn = len(descr[k].shape)
testshape = obj.shape[:len(obj.shape)-nn]
if testshape != shape:
raise ValueError, "array-shape mismatch in array %d" % k
_array = recarray(shape, descr)
# populate the record array (makes a copy)
for i in range(len(arrayList)):
_array[_names[i]] = arrayList[i]
return _array
# shape must be 1-d if you use list of lists...
def fromrecords(recList, dtype=None, shape=None, formats=None, names=None,
titles=None, aligned=False, byteorder=None):
""" create a recarray from a list of records in text form
The data in the same field can be heterogeneous, they will be promoted
to the highest data type. This method is intended for creating
smaller record arrays. If used to create large array without formats
defined
r=fromrecords([(2,3.,'abc')]*100000)
it can be slow.
If formats is None, then this will auto-detect formats. Use list of
tuples rather than list of lists for faster processing.
>>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)],
... names='col1,col2,col3')
>>> print r[0]
(456, 'dbe', 1.2)
>>> r.col1
array([456, 2])
>>> r.col2
chararray(['dbe', 'de'],
dtype='|S3')
>>> import cPickle
>>> print cPickle.loads(cPickle.dumps(r))
[(456, 'dbe', 1.2) (2, 'de', 1.3)]
"""
nfields = len(recList[0])
if formats is None and dtype is None: # slower
obj = sb.array(recList, dtype=object)
arrlist = [sb.array(obj[...,i].tolist()) for i in xrange(nfields)]
return fromarrays(arrlist, formats=formats, shape=shape, names=names,
titles=titles, aligned=aligned, byteorder=byteorder)
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
try:
retval = sb.array(recList, dtype = descr)
except TypeError: # list of lists instead of list of tuples
if (shape is None or shape == 0):
shape = len(recList)
if isinstance(shape, (int, long)):
shape = (shape,)
if len(shape) > 1:
raise ValueError, "Can only deal with 1-d array."
_array = recarray(shape, descr)
for k in xrange(_array.size):
_array[k] = tuple(recList[k])
return _array
else:
if shape is not None and retval.shape != shape:
retval.shape = shape
res = retval.view(recarray)
res.dtype = sb.dtype((record, res.dtype))
return res
def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
""" create a (read-only) record array from binary data contained in
a string"""
if dtype is None and formats is None:
raise ValueError, "Must have dtype= or formats="
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
itemsize = descr.itemsize
if (shape is None or shape == 0 or shape == -1):
shape = (len(datastring)-offset) / itemsize
_array = recarray(shape, descr, buf=datastring, offset=offset)
return _array
def get_remaining_size(fd):
try:
fn = fd.fileno()
except AttributeError:
return os.path.getsize(fd.name) - fd.tell()
st = os.fstat(fn)
size = st.st_size - fd.tell()
return size
def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
"""Create an array from binary file data
If file is a string then that file is opened, else it is assumed
to be a file object.
>>> from tempfile import TemporaryFile
>>> a = np.empty(10,dtype='f8,i4,a5')
>>> a[5] = (0.5,10,'abcde')
>>>
>>> fd=TemporaryFile()
>>> a = a.newbyteorder('<')
>>> a.tofile(fd)
>>>
>>> fd.seek(0)
>>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10,
... byteorder='<')
>>> print r[5]
(0.5, 10, 'abcde')
>>> r.shape
(10,)
"""
if (shape is None or shape == 0):
shape = (-1,)
elif isinstance(shape, (int, long)):
shape = (shape,)
name = 0
if isinstance(fd, str):
name = 1
fd = open(fd, 'rb')
if (offset > 0):
fd.seek(offset, 1)
size = get_remaining_size(fd)
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
itemsize = descr.itemsize
shapeprod = sb.array(shape).prod()
shapesize = shapeprod*itemsize
if shapesize < 0:
shape = list(shape)
shape[ shape.index(-1) ] = size / -shapesize
shape = tuple(shape)
shapeprod = sb.array(shape).prod()
nbytes = shapeprod*itemsize
if nbytes > size:
raise ValueError(
"Not enough bytes left in file for specified shape and type")
# create the array
_array = recarray(shape, descr)
nbytesread = fd.readinto(_array.data)
if nbytesread != nbytes:
raise IOError("Didn't read as many bytes as expected")
if name:
fd.close()
return _array
def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None, copy=True):
"""Construct a record array from a wide-variety of objects.
"""
if isinstance(obj, (type(None), str, file)) and (formats is None) \
and (dtype is None):
raise ValueError("Must define formats (or dtype) if object is "\
"None, string, or an open file")
kwds = {}
if dtype is not None:
dtype = sb.dtype(dtype)
elif formats is not None:
dtype = format_parser(formats, names, titles,
aligned, byteorder)._descr
else:
kwds = {'formats': formats,
'names' : names,
'titles' : titles,
'aligned' : aligned,
'byteorder' : byteorder
}
if obj is None:
if shape is None:
raise ValueError("Must define a shape if obj is None")
return recarray(shape, dtype, buf=obj, offset=offset, strides=strides)
elif isinstance(obj, str):
return fromstring(obj, dtype, shape=shape, offset=offset, **kwds)
elif isinstance(obj, (list, tuple)):
if isinstance(obj[0], (tuple, list)):
return fromrecords(obj, dtype=dtype, shape=shape, **kwds)
else:
return fromarrays(obj, dtype=dtype, shape=shape, **kwds)
elif isinstance(obj, recarray):
if dtype is not None and (obj.dtype != dtype):
new = obj.view(dtype)
else:
new = obj
if copy:
new = new.copy()
return new
elif isinstance(obj, file):
return fromfile(obj, dtype=dtype, shape=shape, offset=offset)
elif isinstance(obj, ndarray):
if dtype is not None and (obj.dtype != dtype):
new = obj.view(dtype)
else:
new = obj
if copy:
new = new.copy()
res = new.view(recarray)
if issubclass(res.dtype.type, nt.void):
res.dtype = sb.dtype((record, res.dtype))
return res
else:
interface = getattr(obj, "__array_interface__", None)
if interface is None or not isinstance(interface, dict):
raise ValueError("Unknown input type")
obj = sb.array(obj)
if dtype is not None and (obj.dtype != dtype):
obj = obj.view(dtype)
res = obj.view(recarray)
if issubclass(res.dtype.type, nt.void):
res.dtype = sb.dtype((record, res.dtype))
return res
|
|
# Copyright: Copyright 2011-2021 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
"""
Two TestCase classes for writing tests be run using Django's test
runner (i.e. `manage.py test`).
"""
import logging ; logger = logging.getLogger(__name__)
import sys
import json
import unittest
from django.conf import settings
from django.test import TestCase as DjangoTestCase, TransactionTestCase
from django.core.management import call_command
# from django.test import Client
from django.db import connection, reset_queries, connections, DEFAULT_DB_ALIAS
from django.utils import translation
from lino.utils import AttrDict
from lino.core.signals import testcase_setup # , database_ready
from lino.core.callbacks import applyCallbackChoice
from .test import CommonTestCase
class DjangoManageTestCase(DjangoTestCase, CommonTestCase):
"""
Adds some extensions to the Django TestCase.
"""
longMessage = True # see unittest. used for check_json_result
override_djangosite_settings = dict()
"""If specified, this is a dict of :class:`Site <lino.core.site.Site>`
attributes to override before running the test.
"""
defining_module = None
"""When you decorate your subclass of TestCase, you must also specify::
defining_module = __name__
Because a decorator will change your class's `__module__`
attribute and :meth:`test_them_all` would search for test methods
in the wrong module.
"""
def __call__(self, *args, **kw):
"""Does some initialization and sends the :attr:`testcase_setup
<lino.utils.testcase_setup>` signal, then calls super.
"""
if self.override_djangosite_settings:
settings.SITE.override_defaults(
**self.override_djangosite_settings)
# Make sure that every test runs with the same language.
# Without this it is possible that some other language may
# have been activated by previous tests:
if settings.LANGUAGE_CODE:
translation.activate(settings.LANGUAGE_CODE)
testcase_setup.send(self)
return super(DjangoManageTestCase, self).__call__(*args, **kw)
# def tearDown(self):
# super(DjangoManageTestCase, self).tearDown()
#
# def setUp(self):
# super(DjangoManageTestCase, self).setUp()
# # 20151203 database_ready.send(self)
def check_sql_queries(self, *expected):
"""Checks whether the specified expected SQL queries match to those
who actually have been emitted.
"""
for i, x1 in enumerate(expected):
if len(connection.queries) <= i:
self.fail("SQL %d expected %s, found nothing" % (i, x1))
sql = connection.queries[i]['sql'].strip()
x2 = x1.split('[...]')
if len(x2) == 2:
s = x2.pop().strip()
if not sql.endswith(s):
self.fail("SQL %d doesn't end with %s:---\n%s\n---" %
(i, s, sql))
self.assertEqual(len(x2), 1)
s = x2[0].strip()
if not sql.startswith(s):
self.fail("SQL %d doesn't start with %s:---\n%s\n---" %
(i, s, sql))
if len(expected) < len(connection.queries):
for q in connection.queries[len(expected):]:
logger.warning("Unexpected SQL:---\n%s\n---", q['sql'])
self.fail("Found unexpected SQL")
reset_queries()
def get_json_dict(self, *args, **kwargs):
return self.client_json_dict(self.client.get, *args, **kwargs)
def post_json_dict(self, *args, **kwargs):
return self.client_json_dict(self.client.post, *args, **kwargs)
def put_json_dict(self, *args, **kwargs):
return self.client_json_dict(self.client.put, *args, **kwargs)
def client_json_dict(self, meth, username, url, *data, **extra):
"""
Send a GET or POST or PUT to client with given username, url and data.
The server is expected to respond with a JSON encoded response. Parse
the response's content (which is expected to contain a dict), convert
this dict to an AttrDict before returning it.
"""
ar = settings.SITE.login(username)
self.client.force_login(ar.user)
extra[settings.SITE.remote_user_header] = username
# extra.update(REMOTE_USER=username)
res = meth(url, *data, **extra)
if res.status_code != 200:
raise Exception("{} gave status code {} instead of 200".format(
url, res.status_code))
content = res.content.decode()
try:
d = json.loads(content)
except ValueError as e:
raise ValueError("Invalid JSON {} : {}".format(content, e))
return AttrDict(d)
@unittest.skip("Broken. I guess this has come because callbacks are now called again from the beginning....")
def check_callback_dialog(self, meth, username, url, dialog, *data, **extra):
"""Check wether the given dialog runs as expected and return the final
response as an `AttrDict`.
- `meth` : should be `self.client.get` or `self.client.post`
- `username` : the username
- `url` : the url
- `dialog` : a list of `(expected, reply)` tuples where
`expected` it the expected response message and `reply` must
be one of `'yes'` or `'no'` for all items except the
last item, where it must be None.
- `data` : optional positional arguments to the `meth`
- `extra` : optional keyword arguments to the `meth`
"""
result = self.client_json_dict(meth, username, url, *data, **extra)
for expected, answer in dialog:
self.assertEquivalent(expected, result.message)
if answer is None:
return result
cb = result.xcallback
self.assertEqual(cb['title'], "Confirmation")
self.assertEqual(cb['buttons'],
[[x, x.title()] for x in ["yes", "no"]])
print("20210215", extra)
applyCallbackChoice(result, *data, choice="yes")
# print(extra)
result = self.client_json_dict(meth, username, url, *data, **extra)
self.assertEqual(result.success, True)
raise Exception("last item of dialog must have answer None")
class RemoteAuthTestCase(DjangoManageTestCase):
"""
Base class for tests that use remote http authentication. We
override the :meth:`__call__` method in order to simulate
`remote_user_header <lino.core.site.Site.remote_user_header>`
being set to ``'REMOTE_USER'``.
"""
def __call__(self, *args, **kw):
settings.SITE.override_defaults(remote_user_header='REMOTE_USER')
mysettings = dict()
for k in ('MIDDLEWARE', 'AUTHENTICATION_BACKENDS'):
mysettings[k] = settings.SITE.django_settings.get(k)
with self.settings(**mysettings):
return super(RemoteAuthTestCase, self).__call__(*args, **kw)
TestCase = RemoteAuthTestCase
class NoAuthTestCase(DjangoManageTestCase):
def __call__(self, *args, **kw):
# these tests use remote http authentication, so we override the run()
# method to simulate
settings.SITE.override_defaults(remote_user_header=None)
mysettings = dict()
for k in ('MIDDLEWARE',):
mysettings[k] = settings.SITE.django_settings.get(k)
with self.settings(**mysettings):
return super(NoAuthTestCase, self).__call__(*args, **kw)
class RestoreTestCase(TransactionTestCase):
"""
Used for testing migrations from previous versions.
See :doc:`/dev/migtests`.
"""
tested_versions = []
"""
A list of strings, each string is a version for which there must
be a migration dump created by :manage:`makemigdump`.
"""
def test_restore(self):
conn = connections[DEFAULT_DB_ALIAS]
cursor = conn.cursor()
cursor.execute('PRAGMA foreign_keys = OFF')
enabled = cursor.execute('PRAGMA foreign_keys').fetchone()[0]
for v in self.tested_versions:
run_args = ["tests/dumps/{}/restore.py".format(v),
"--noinput"]
sys.argv = ["manage.py", "run"] + run_args
call_command("run", *run_args)
|
|
import os
from gi.repository import Gtk
class ConfigWindow:
def __init__(self, main_window):
self.gtkwin = Gtk.Window()
self.gtkwin.set_position(Gtk.WindowPosition.CENTER)
self.gtkwin.connect("destroy", self.on_destroy)
self.gtkwin.set_size_request(640, 320)
self.gtkwin.set_title("Kismon Preferences")
self.main_window = main_window
self.config = main_window.config
self.map = main_window.map
self.notebook = Gtk.Notebook()
self.gtkwin.add(self.notebook)
general_page = Gtk.Table(n_rows=2, n_columns=1)
self.notebook.append_page(general_page)
self.notebook.set_tab_label_text(general_page, "General")
self.init_general_page(general_page)
map_page = Gtk.Table(n_rows=2, n_columns=1)
self.notebook.append_page(map_page)
self.notebook.set_tab_label_text(map_page, "Map")
if self.map is None:
label = Gtk.Label(label="Map disabled")
map_page.attach(label, 0, 1, 0, 1, yoptions=Gtk.AttachOptions.SHRINK)
else:
self.init_map_page(map_page)
self.gtkwin.show_all()
def init_general_page(self, page):
frame = Gtk.Frame()
frame.set_label("Log List")
page.attach(frame, 0, 1, 0, 1, yoptions=Gtk.AttachOptions.SHRINK)
vbox = Gtk.VBox()
frame.add(vbox)
hbox = Gtk.HBox()
vbox.add(hbox)
label = Gtk.Label(label="Max rows in the log list: ")
label.set_alignment(xalign=0, yalign=0.5)
label.set_justify(Gtk.Justification.RIGHT)
hbox.pack_start(label, False, False, 5)
field = Gtk.SpinButton()
field.set_numeric(True)
field.set_max_length(5)
field.set_increments(1,100)
field.set_range(-1,99999)
field.set_value(self.config["window"]["log_list_max"])
field.connect("output", self.on_change_log_list_max)
hbox.pack_start(field, False, False, 5)
label = Gtk.Label(label="-1 = unlimited 0 = disable")
label.set_alignment(xalign=0, yalign=0.5)
hbox.pack_start(label, False, False, 5)
frame = Gtk.Frame()
frame.set_label("Autosave")
page.attach(frame, 0, 1, 1, 2, yoptions=Gtk.AttachOptions.SHRINK)
vbox = Gtk.VBox()
frame.add(vbox)
hbox = Gtk.HBox()
vbox.add(hbox)
label = Gtk.Label(label="Save the networks every (in minutes):")
hbox.pack_start(label, False, False, 5)
field = Gtk.SpinButton()
field.set_numeric(True)
field.set_max_length(5)
field.set_increments(1,100)
field.set_range(0,99999)
field.set_value(self.config["networks"]["autosave"])
field.connect("output", self.on_change_autosave)
hbox.pack_start(field, False, False, 5)
label = Gtk.Label(label="0 = disable")
label.set_alignment(xalign=0, yalign=0.5)
hbox.pack_start(label, False, False, 5)
def on_change_log_list_max(self, widget):
if self.config["window"]["log_list_max"] == int(widget.get_value()):
return
self.config["window"]["log_list_max"] = int(widget.get_value())
self.main_window.log_list.cleanup(0)
def on_change_autosave(self, widget):
if self.config["networks"]["autosave"] == int(widget.get_value()):
return
self.config["networks"]["autosave"] = int(widget.get_value())
self.main_window.networks.set_autosave(self.config["networks"]["autosave"])
def init_map_page(self, map_page):
position_frame = Gtk.Frame()
position_frame.set_label("Position")
map_page.attach(position_frame, 0, 1, 0, 1, yoptions=Gtk.AttachOptions.SHRINK)
position_vbox = Gtk.VBox()
position_frame.add(position_vbox)
map_widget = Gtk.RadioButton(group=None, label='In main window (default)')
if self.config["window"]["map_position"] == "widget":
map_widget.clicked()
map_widget.connect("clicked", self.main_window.on_map_widget)
position_vbox.add(map_widget)
map_window = Gtk.RadioButton(group=map_widget, label='In seperate window')
if self.config["window"]["map_position"] == "window":
map_window.clicked()
map_window.connect("clicked", self.main_window.on_map_window)
position_vbox.add(map_window)
map_hide = Gtk.RadioButton(group=map_widget, label='Hide')
if self.config["window"]["map_position"] == "hide":
map_hide.clicked()
map_hide.connect("clicked", self.main_window.on_map_hide)
position_vbox.add(map_hide)
source_frame = Gtk.Frame()
source_frame.set_label("Source")
source_vbox = Gtk.VBox()
source_frame.add(source_vbox)
map_page.attach(source_frame, 0, 1, 1, 2, yoptions=Gtk.AttachOptions.SHRINK)
first = None
for name, source in (("Openstreetmap (default)", "openstreetmap"),
("Openstreetmap Renderer", "openstreetmap-renderer"),
("Custom tile source", "custom")):
map_source = Gtk.RadioButton(group=first, label=name)
if first is None:
first = map_source
if self.config["map"]["source"] == source:
map_source.clicked()
map_source.connect("clicked", self.on_map_source, source)
source_vbox.add(map_source)
hbox = Gtk.HBox()
source_vbox.add(hbox)
label = Gtk.Label(label=" URL: ")
label.set_alignment(xalign=0, yalign=0.5)
label.set_justify(Gtk.Justification.LEFT)
hbox.pack_start(label, False, False, 5)
entry = Gtk.Entry()
entry.set_width_chars(50)
entry.set_text(self.config["map"]["custom_source_url"])
entry.connect("changed", self.on_change_map_source_custom_url)
hbox.pack_start(entry, False, False, 5)
hbox = Gtk.HBox()
source_vbox.add(hbox)
x=1
for name in (" Zoom Levels: ", " - "):
label = Gtk.Label(label=name)
label.set_alignment(xalign=0, yalign=0.5)
label.set_justify(Gtk.Justification.LEFT)
hbox.pack_start(label, False, False, 5)
field = Gtk.SpinButton()
field.set_numeric(True)
field.set_max_length(5)
field.set_increments(1,3)
field.set_range(1,18)
if x == 1:
name = "custom_source_min"
else:
name = "custom_source_max"
field.set_value(self.config["map"][name])
field.connect("output", self.on_change_map_source_custom_zoom, name)
hbox.pack_start(field, False, False, 5)
x += 1
apply_button = Gtk.Button.new_with_mnemonic('_Apply')
apply_button.connect("clicked", self.on_map_source, "custom")
hbox.pack_start(apply_button, False, False, 5)
perf_frame = Gtk.Frame()
perf_frame.set_label("Performance")
perf_vbox = Gtk.VBox()
perf_frame.add(perf_vbox)
map_page.attach(perf_frame, 0, 1, 4, 5, yoptions=Gtk.AttachOptions.SHRINK)
perf_marker_positions = Gtk.CheckButton.new_with_label("Update marker positions")
if self.config["map"]["update_marker_positions"] is True:
perf_marker_positions.clicked()
perf_marker_positions.connect("clicked", self.on_update_marker_positions)
perf_vbox.add(perf_marker_positions)
def on_destroy(self, window):
self.gtkwin = None
def on_map_source(self, widget, source):
if (type(widget) == Gtk.RadioButton and widget.get_active()) or type(widget) == Gtk.Button:
self.map.set_source(source)
if self.config["window"]["map_position"] == "widget":
self.main_window.on_map_widget(None, True)
elif self.config["window"]["map_position"] == "window":
self.main_window.map_window.gtkwin.add(self.main_window.map.widget)
self.main_window.map_window.gtkwin.show_all()
def on_change_map_source_custom_url(self, widget):
self.config["map"]["custom_source_url"] = widget.get_text()
def on_change_map_source_custom_zoom(self, widget, name):
self.config["map"][name] = int(widget.get_value())
def on_update_marker_positions(self, widget):
self.config["map"]["update_marker_positions"] = widget.get_active()
|
|
import os
import json
import logging
import select
import psycopg2
from redash.query_runner import *
from redash.utils import JSONEncoder
logger = logging.getLogger(__name__)
types_map = {
20: TYPE_INTEGER,
21: TYPE_INTEGER,
23: TYPE_INTEGER,
700: TYPE_FLOAT,
1700: TYPE_FLOAT,
701: TYPE_FLOAT,
16: TYPE_BOOLEAN,
1082: TYPE_DATE,
1114: TYPE_DATETIME,
1184: TYPE_DATETIME,
1014: TYPE_STRING,
1015: TYPE_STRING,
1008: TYPE_STRING,
1009: TYPE_STRING,
2951: TYPE_STRING
}
def _wait(conn, timeout=None):
while 1:
try:
state = conn.poll()
if state == psycopg2.extensions.POLL_OK:
break
elif state == psycopg2.extensions.POLL_WRITE:
select.select([], [conn.fileno()], [], timeout)
elif state == psycopg2.extensions.POLL_READ:
select.select([conn.fileno()], [], [], timeout)
else:
raise psycopg2.OperationalError("poll() returned %s" % state)
except select.error:
raise psycopg2.OperationalError("select.error received")
class PostgreSQL(BaseSQLQueryRunner):
noop_query = "SELECT 1"
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"user": {
"type": "string"
},
"password": {
"type": "string"
},
"host": {
"type": "string",
"default": "127.0.0.1"
},
"port": {
"type": "number",
"default": 5432
},
"dbname": {
"type": "string",
"title": "Database Name"
},
"sslmode": {
"type": "string",
"title": "SSL Mode",
"default": "prefer"
}
},
"order": ['host', 'port', 'user', 'password'],
"required": ["dbname"],
"secret": ["password"]
}
@classmethod
def type(cls):
return "pg"
def _get_definitions(self, schema, query):
results, error = self.run_query(query, None)
if error is not None:
raise Exception("Failed getting schema.")
results = json.loads(results)
for row in results['rows']:
if row['table_schema'] != 'public':
table_name = '{}.{}'.format(row['table_schema'], row['table_name'])
else:
table_name = row['table_name']
if table_name not in schema:
schema[table_name] = {'name': table_name, 'columns': []}
schema[table_name]['columns'].append(row['column_name'])
def _get_tables(self, schema):
'''
relkind constants per https://www.postgresql.org/docs/10/static/catalog-pg-class.html
r = regular table
v = view
m = materialized view
f = foreign table
p = partitioned table (new in 10)
---
i = index
S = sequence
t = TOAST table
c = composite type
'''
query = """
SELECT s.nspname as table_schema,
c.relname as table_name,
a.attname as column_name
FROM pg_class c
JOIN pg_namespace s
ON c.relnamespace = s.oid
AND s.nspname NOT IN ('pg_catalog', 'information_schema')
JOIN pg_attribute a
ON a.attrelid = c.oid
AND a.attnum > 0
AND NOT a.attisdropped
WHERE c.relkind IN ('r', 'v', 'm', 'f', 'p')
"""
self._get_definitions(schema, query)
return schema.values()
def _get_connection(self):
connection = psycopg2.connect(user=self.configuration.get('user'),
password=self.configuration.get('password'),
host=self.configuration.get('host'),
port=self.configuration.get('port'),
dbname=self.configuration.get('dbname'),
sslmode=self.configuration.get('sslmode'),
async_=True)
return connection
def run_query(self, query, user):
connection = self._get_connection()
_wait(connection, timeout=10)
cursor = connection.cursor()
try:
cursor.execute(query)
_wait(connection)
if cursor.description is not None:
columns = self.fetch_columns([(i[0], types_map.get(i[1], None)) for i in cursor.description])
rows = [dict(zip((c['name'] for c in columns), row)) for row in cursor]
data = {'columns': columns, 'rows': rows}
error = None
json_data = json.dumps(data, cls=JSONEncoder)
else:
error = 'Query completed but it returned no data.'
json_data = None
except (select.error, OSError) as e:
error = "Query interrupted. Please retry."
json_data = None
except psycopg2.DatabaseError as e:
error = e.message
json_data = None
except (KeyboardInterrupt, InterruptException):
connection.cancel()
error = "Query cancelled by user."
json_data = None
finally:
connection.close()
return json_data, error
class Redshift(PostgreSQL):
@classmethod
def type(cls):
return "redshift"
def _get_connection(self):
sslrootcert_path = os.path.join(os.path.dirname(__file__), './files/redshift-ca-bundle.crt')
connection = psycopg2.connect(user=self.configuration.get('user'),
password=self.configuration.get('password'),
host=self.configuration.get('host'),
port=self.configuration.get('port'),
dbname=self.configuration.get('dbname'),
sslmode=self.configuration.get('sslmode', 'prefer'),
sslrootcert=sslrootcert_path,
async_=True)
return connection
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"user": {
"type": "string"
},
"password": {
"type": "string"
},
"host": {
"type": "string"
},
"port": {
"type": "number"
},
"dbname": {
"type": "string",
"title": "Database Name"
},
"sslmode": {
"type": "string",
"title": "SSL Mode",
"default": "prefer"
}
},
"order": ['host', 'port', 'user', 'password'],
"required": ["dbname", "user", "password", "host", "port"],
"secret": ["password"]
}
def _get_tables(self, schema):
# Use svv_columns to include internal & external (Spectrum) tables and views data for Redshift
# http://docs.aws.amazon.com/redshift/latest/dg/r_SVV_COLUMNS.html
# Use PG_GET_LATE_BINDING_VIEW_COLS to include schema for late binding views data for Redshift
# http://docs.aws.amazon.com/redshift/latest/dg/PG_GET_LATE_BINDING_VIEW_COLS.html
query = """
WITH tables AS (
SELECT DISTINCT table_name,
table_schema,
column_name,
ordinal_position AS pos
FROM svv_columns
WHERE table_schema NOT IN ('pg_internal','pg_catalog','information_schema')
UNION ALL
SELECT DISTINCT view_name::varchar AS table_name,
view_schema::varchar AS table_schema,
col_name::varchar AS column_name,
col_num AS pos
FROM pg_get_late_binding_view_cols()
cols(view_schema name, view_name name, col_name name, col_type varchar, col_num int)
)
SELECT table_name, table_schema, column_name
FROM tables
ORDER BY table_name, pos
"""
self._get_definitions(schema, query)
return schema.values()
class CockroachDB(PostgreSQL):
@classmethod
def type(cls):
return "cockroach"
register(PostgreSQL)
register(Redshift)
register(CockroachDB)
|
|
# -*- coding: utf-8 -*
from django.utils.translation import ugettext as _
COUNTRIES = (
('AF', 'AFG', '004', _('Afghanistan')),
('AX', 'ALA', '248', _('Aland Islands')),
('AL', 'ALB', '008', _('Albania')),
('DZ', 'DZA', '012', _('Algeria')),
('AS', 'ASM', '016', _('American Samoa')),
('AD', 'AND', '020', _('Andorra')),
('AO', 'AGO', '024', _('Angola')),
('AI', 'AIA', '660', _('Anguilla')),
('AQ', 'ATA', '010', _('Antarctica')),
('AG', 'ATG', '028', _('Antigua and Barbuda')),
('AR', 'ARG', '032', _('Argentina')),
('AM', 'ARM', '051', _('Armenia')),
('AW', 'ABW', '533', _('Aruba')),
('AU', 'AUS', '036', _('Australia')),
('AT', 'AUT', '040', _('Austria')),
('AZ', 'AZE', '031', _('Azerbaijan')),
('BS', 'BHS', '044', _('the Bahamas')),
('BH', 'BHR', '048', _('Bahrain')),
('BD', 'BGD', '050', _('Bangladesh')),
('BB', 'BRB', '052', _('Barbados')),
('BY', 'BLR', '112', _('Belarus')),
('BE', 'BEL', '056', _('Belgium')),
('BZ', 'BLZ', '084', _('Belize')),
('BJ', 'BEN', '204', _('Benin')),
('BM', 'BMU', '060', _('Bermuda')),
('BT', 'BTN', '064', _('Bhutan')),
('BO', 'BOL', '068', _('Bolivia')),
('BA', 'BIH', '070', _('Bosnia and Herzegovina')),
('BW', 'BWA', '072', _('Botswana')),
('BV', 'BVT', '074', _('Bouvet Island')),
('BR', 'BRA', '076', _('Brazil')),
('IO', 'IOT', '086', _('British Indian Ocean Territory')),
('BN', 'BRN', '096', _('Brunei Darussalam')),
('BG', 'BGR', '100', _('Bulgaria')),
('BF', 'BFA', '854', _('Burkina Faso')),
('BI', 'BDI', '108', _('Burundi')),
('KH', 'KHM', '116', _('Cambodia')),
('CM', 'CMR', '120', _('Cameroon')),
('CA', 'CAN', '124', _('Canada')),
('CV', 'CPV', '132', _('Cape Verde')),
('KY', 'CYM', '136', _('Cayman Islands')),
('CF', 'CAF', '140', _('Central African Republic')),
('TD', 'TCD', '148', _('Chad')),
('CL', 'CHL', '152', _('Chile')),
('CN', 'CHN', '156', _('China')),
('CX', 'CXR', '162', _('Christmas Island')),
('CC', 'CCK', '166', _('Cocos (Keeling) Islands')),
('CO', 'COL', '170', _('Colombia')),
('KM', 'COM', '174', _('Comoros')),
('CG', 'COG', '178', _('Congo')),
('CD', 'COD', '180', _('Democratic Republic of the Congo')),
('CK', 'COK', '184', _('Cook Islands')),
('CR', 'CRI', '188', _('Costa Rica')),
('CI', 'CIV', '384', _('Cote d\'Ivoire')),
('HR', 'HRV', '191', _('Croatia')),
('CU', 'CUB', '192', _('Cuba')),
('CY', 'CYP', '196', _('Cyprus')),
('CZ', 'CZE', '203', _('Czech Republic')),
('DK', 'DNK', '208', _('Denmark')),
('DJ', 'DJI', '262', _('Djibouti')),
('DM', 'DMA', '212', _('Dominica')),
('DO', 'DOM', '214', _('Dominican Republic')),
('EC', 'ECU', '218', _('Ecuador')),
('EG', 'EGY', '818', _('Egypt')),
('SV', 'SLV', '222', _('El Salvador')),
('GQ', 'GNQ', '226', _('Equatorial Guinea')),
('ER', 'ERI', '232', _('Eritrea')),
('EE', 'EST', '233', _('Estonia')),
('ET', 'ETH', '231', _('Ethiopia')),
('FK', 'FLK', '238', _('Falkland Islands (Malvinas)')),
('FO', 'FRO', '234', _('Faroe Islands')),
('FJ', 'FJI', '242', _('Fiji')),
('FI', 'FIN', '246', _('Finland')),
('FR', 'FRA', '250', _('France')),
('GF', 'GUF', '254', _('French Guiana')),
('PF', 'PYF', '258', _('French Polynesia')),
('TF', 'ATF', '260', _('French Southern and Antarctic Lands')),
('GA', 'GAB', '266', _('Gabon')),
('GM', 'GMB', '270', _('Gambia')),
('GE', 'GEO', '268', _('Georgia')),
('DE', 'DEU', '276', _('Germany')),
('GH', 'GHA', '288', _('Ghana')),
('GI', 'GIB', '292', _('Gibraltar')),
('GR', 'GRC', '300', _('Greece')),
('GL', 'GRL', '304', _('Greenland')),
('GD', 'GRD', '308', _('Grenada')),
('GP', 'GLP', '312', _('Guadeloupe')),
('GU', 'GUM', '316', _('Guam')),
('GT', 'GTM', '320', _('Guatemala')),
('GG', 'GGY', '831', _('Guernsey')),
('GN', 'GIN', '324', _('Guinea')),
('GW', 'GNB', '624', _('Guinea-Bissau')),
('GY', 'GUY', '328', _('Guyana')),
('HT', 'HTI', '332', _('Haiti')),
('HM', 'HMD', '334', _('Heard Island and McDonald Islands')),
('VA', 'VAT', '336', _('Vatican City Holy See')),
('HN', 'HND', '340', _('Honduras')),
('HK', 'HKG', '344', _('Hong Kong')),
('HU', 'HUN', '348', _('Hungary')),
('IS', 'ISL', '352', _('Iceland')),
('IN', 'IND', '356', _('India')),
('ID', 'IDN', '360', _('Indonesia')),
('IR', 'IRN', '364', _('Iran')),
('IQ', 'IRQ', '368', _('Iraq')),
('IE', 'IRL', '372', _('Ireland')),
('IM', 'IMN', '833', _('Isle of Man')),
('IL', 'ISR', '376', _('Israel')),
('IT', 'ITA', '380', _('Italy')),
('JM', 'JAM', '388', _('Jamaica')),
('JP', 'JPN', '392', _('Japan')),
('JE', 'JEY', '832', _('Jersey')),
('JO', 'JOR', '400', _('Jordan')),
('KZ', 'KAZ', '398', _('Kazakhstan')),
('KE', 'KEN', '404', _('Kenya')),
('KI', 'KIR', '296', _('Kiribati')),
('KP', 'PRK', '408', _('North Korea')),
('KR', 'KOR', '410', _('South Korea')),
('KW', 'KWT', '414', _('Kuwait')),
('KG', 'KGZ', '417', _('Kyrgyzstan')),
('LA', 'LAO', '418', _('Laos Lao')),
('LV', 'LVA', '428', _('Latvia')),
('LB', 'LBN', '422', _('Lebanon')),
('LS', 'LSO', '426', _('Lesotho')),
('LR', 'LBR', '430', _('Liberia')),
('LY', 'LBY', '434', _('Libya Libyan Arab Jamahiriya')),
('LI', 'LIE', '438', _('Liechtenstein')),
('LT', 'LTU', '440', _('Lithuania')),
('LU', 'LUX', '442', _('Luxembourg')),
('MO', 'MAC', '446', _('Macau Macao')),
('MK', 'MKD', '807', _('Macedonia')),
('MG', 'MDG', '450', _('Madagascar')),
('MW', 'MWI', '454', _('Malawi')),
('MY', 'MYS', '458', _('Malaysia')),
('MV', 'MDV', '462', _('Maldives')),
('ML', 'MLI', '466', _('Mali')),
('MT', 'MLT', '470', _('Malta')),
('MH', 'MHL', '584', _('Marshall Islands')),
('MQ', 'MTQ', '474', _('Martinique')),
('MR', 'MRT', '478', _('Mauritania')),
('MU', 'MUS', '480', _('Mauritius')),
('YT', 'MYT', '175', _('Mayotte')),
('MX', 'MEX', '484', _('Mexico')),
('FM', 'FSM', '583', _('Micronesia')),
('MD', 'MDA', '498', _('Moldova')),
('MC', 'MCO', '492', _('Monaco')),
('MN', 'MNG', '496', _('Mongolia')),
('ME', 'MNE', '499', _('Montenegro')),
('MS', 'MSR', '500', _('Montserrat')),
('MA', 'MAR', '504', _('Morocco')),
('MZ', 'MOZ', '508', _('Mozambique')),
('MM', 'MMR', '104', _('Myanmar')),
('NA', 'NAM', '516', _('Namibia')),
('NR', 'NRU', '520', _('Nauru')),
('NP', 'NPL', '524', _('Nepal')),
('NL', 'NLD', '528', _('Netherlands')),
('AN', 'ANT', '530', _('Netherlands Antilles')),
('NC', 'NCL', '540', _('New Caledonia')),
('NZ', 'NZL', '554', _('New Zealand')),
('NI', 'NIC', '558', _('Nicaragua')),
('NE', 'NER', '562', _('Niger')),
('NG', 'NGA', '566', _('Nigeria')),
('NU', 'NIU', '570', _('Niue')),
('NF', 'NFK', '574', _('Norfolk Island Norfolk Island')),
('MP', 'MNP', '580', _('Northern Mariana Islands')),
('NO', 'NOR', '578', _('Norway')),
('OM', 'OMN', '512', _('Oman')),
('PK', 'PAK', '586', _('Pakistan')),
('PW', 'PLW', '585', _('Palau')),
('PS', 'PSE', '275', _('Palestinian Territory')),
('PA', 'PAN', '591', _('Panama')),
('PG', 'PNG', '598', _('Papua New Guinea')),
('PY', 'PRY', '600', _('Paraguay')),
('PE', 'PER', '604', _('Peru')),
('PH', 'PHL', '608', _('Philippines')),
('PN', 'PCN', '612', _('Pitcairn Islands')),
('PL', 'POL', '616', _('Poland')),
('PT', 'PRT', '620', _('Portugal')),
('PR', 'PRI', '630', _('Puerto Rico')),
('QA', 'QAT', '634', _('Qatar')),
('RE', 'REU', '638', _('Reunion')),
('RO', 'ROU', '642', _('Romania')),
('RU', 'RUS', '643', _('Russia')),
('RW', 'RWA', '646', _('Rwanda')),
('SH', 'SHN', '654', _('Saint Helena')),
('KN', 'KNA', '659', _('Saint Kitts and Nevis')),
('LC', 'LCA', '662', _('Saint Lucia')),
('PM', 'SPM', '666', _('Saint Pierre and Miquelon')),
('VC', 'VCT', '670', _('Saint Vincent and the Grenadines')),
('WS', 'WSM', '882', _('Samoa')),
('SM', 'SMR', '674', _('San Marino')),
('ST', 'STP', '678', _('Sao Tome and Principe')),
('SA', 'SAU', '682', _('Saudi Arabia')),
('SN', 'SEN', '686', _('Senegal')),
('RS', 'SRB', '688', _('Serbia')),
('SC', 'SYC', '690', _('Seychelles')),
('SL', 'SLE', '694', _('Sierra Leone')),
('SG', 'SGP', '702', _('Singapore')),
('SK', 'SVK', '703', _('Slovakia')),
('SI', 'SVN', '705', _('Slovenia')),
('SB', 'SLB', '090', _('Solomon Islands')),
('SO', 'SOM', '706', _('Somalia')),
('ZA', 'ZAF', '710', _('South Africa')),
('GS', 'SGS', '239', _('South Georgia and the South Sandwich Islands')),
('ES', 'ESP', '724', _('Spain')),
('LK', 'LKA', '144', _('Sri Lanka')),
('SD', 'SDN', '736', _('Sudan')),
('SR', 'SUR', '740', _('Suriname')),
('SJ', 'SJM', '744', _('Svalbard and Jan Mayen')),
('SZ', 'SWZ', '748', _('Swaziland')),
('SE', 'SWE', '752', _('Sweden')),
('CH', 'CHE', '756', _('Switzerland')),
('SY', 'SYR', '760', _('Syria')),
('TW', 'TWN', '158', _('Taiwan')),
('TJ', 'TJK', '762', _('Tajikistan')),
('TZ', 'TZA', '834', _('Tanzania')),
('TH', 'THA', '764', _('Thailand')),
('TL', 'TLS', '626', _('East Timor')),
('TG', 'TGO', '768', _('Togo')),
('TK', 'TKL', '772', _('Tokelau')),
('TO', 'TON', '776', _('Tonga')),
('TT', 'TTO', '780', _('Trinidad and Tobago')),
('TN', 'TUN', '788', _('Tunisia')),
('TR', 'TUR', '792', _('Turkey')),
('TM', 'TKM', '795', _('Turkmenistan')),
('TC', 'TCA', '796', _('Turks and Caicos Islands')),
('TV', 'TUV', '798', _('Tuvalu')),
('UG', 'UGA', '800', _('Uganda')),
('UA', 'UKR', '804', _('Ukraine')),
('AE', 'ARE', '784', _('United Arab Emirates')),
('GB', 'GBR', '826', _('United Kingdom')),
('US', 'USA', '840', _('United States')),
('UM', 'UMI', '581', _('United States Minor Outlying Islands')),
('UY', 'URY', '858', _('Uruguay')),
('UZ', 'UZB', '860', _('Uzbekistan')),
('VU', 'VUT', '548', _('Vanuatu')),
('VE', 'VEN', '862', _('Venezuela')),
('VN', 'VNM', '704', _('Vietnam Viet Nam')),
('VG', 'VGB', '092', _('British Virgin Islands')),
('VI', 'VIR', '850', _('United States Virgin Islands')),
('WF', 'WLF', '876', _('Wallis and Futuna')),
('EH', 'ESH', '732', _('Western Sahara')),
('YE', 'YEM', '887', _('Yemen')),
('ZM', 'ZMB', '894', _('Zambia')),
('ZW', 'ZWE', '716', _('Zimbabwe')),
)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import unittest
from io import StringIO
import mock
import paramiko
from airflow.models import Connection
from airflow.providers.ssh.hooks.ssh import SSHHook
from airflow.utils import db
from airflow.utils.session import create_session
HELLO_SERVER_CMD = """
import socket, sys
listener = socket.socket()
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind(('localhost', 2134))
listener.listen(1)
sys.stdout.write('ready')
sys.stdout.flush()
conn = listener.accept()[0]
conn.sendall(b'hello')
"""
def generate_key_string(pkey: paramiko.PKey):
key_fh = StringIO()
pkey.write_private_key(key_fh)
key_fh.seek(0)
key_str = key_fh.read()
return key_str
TEST_PKEY = paramiko.RSAKey.generate(4096)
TEST_PRIVATE_KEY = generate_key_string(pkey=TEST_PKEY)
class TestSSHHook(unittest.TestCase):
CONN_SSH_WITH_PRIVATE_KEY_EXTRA = 'ssh_with_private_key_extra'
CONN_SSH_WITH_EXTRA = 'ssh_with_extra'
@classmethod
def tearDownClass(cls) -> None:
with create_session() as session:
conns_to_reset = [
cls.CONN_SSH_WITH_PRIVATE_KEY_EXTRA,
]
connections = session.query(Connection).filter(Connection.conn_id.in_(conns_to_reset))
connections.delete(synchronize_session=False)
session.commit()
@classmethod
def setUpClass(cls) -> None:
db.merge_conn(
Connection(
conn_id=cls.CONN_SSH_WITH_EXTRA,
host='localhost',
conn_type='ssh',
extra='{"compress" : true, "no_host_key_check" : "true", '
'"allow_host_key_change": false}'
)
)
db.merge_conn(
Connection(
conn_id=cls.CONN_SSH_WITH_PRIVATE_KEY_EXTRA,
host='localhost',
conn_type='ssh',
extra=json.dumps({
"private_key": TEST_PRIVATE_KEY,
})
)
)
@mock.patch('airflow.providers.ssh.hooks.ssh.paramiko.SSHClient')
def test_ssh_connection_with_password(self, ssh_mock):
hook = SSHHook(remote_host='remote_host',
port='port',
username='username',
password='password',
timeout=10,
key_file='fake.file')
with hook.get_conn():
ssh_mock.return_value.connect.assert_called_once_with(
hostname='remote_host',
username='username',
password='password',
key_filename='fake.file',
timeout=10,
compress=True,
port='port',
sock=None
)
@mock.patch('airflow.providers.ssh.hooks.ssh.paramiko.SSHClient')
def test_ssh_connection_without_password(self, ssh_mock):
hook = SSHHook(remote_host='remote_host',
port='port',
username='username',
timeout=10,
key_file='fake.file')
with hook.get_conn():
ssh_mock.return_value.connect.assert_called_once_with(
hostname='remote_host',
username='username',
key_filename='fake.file',
timeout=10,
compress=True,
port='port',
sock=None
)
@mock.patch('airflow.providers.ssh.hooks.ssh.SSHTunnelForwarder')
def test_tunnel_with_password(self, ssh_mock):
hook = SSHHook(remote_host='remote_host',
port='port',
username='username',
password='password',
timeout=10,
key_file='fake.file')
with hook.get_tunnel(1234):
ssh_mock.assert_called_once_with('remote_host',
ssh_port='port',
ssh_username='username',
ssh_password='password',
ssh_pkey='fake.file',
ssh_proxy=None,
local_bind_address=('localhost', ),
remote_bind_address=('localhost', 1234),
logger=hook.log)
@mock.patch('airflow.providers.ssh.hooks.ssh.SSHTunnelForwarder')
def test_tunnel_without_password(self, ssh_mock):
hook = SSHHook(remote_host='remote_host',
port='port',
username='username',
timeout=10,
key_file='fake.file')
with hook.get_tunnel(1234):
ssh_mock.assert_called_once_with('remote_host',
ssh_port='port',
ssh_username='username',
ssh_pkey='fake.file',
ssh_proxy=None,
local_bind_address=('localhost', ),
remote_bind_address=('localhost', 1234),
host_pkey_directories=[],
logger=hook.log)
def test_conn_with_extra_parameters(self):
ssh_hook = SSHHook(ssh_conn_id=self.CONN_SSH_WITH_EXTRA)
self.assertEqual(ssh_hook.compress, True)
self.assertEqual(ssh_hook.no_host_key_check, True)
self.assertEqual(ssh_hook.allow_host_key_change, False)
@mock.patch('airflow.providers.ssh.hooks.ssh.SSHTunnelForwarder')
def test_tunnel_with_private_key(self, ssh_mock):
hook = SSHHook(
ssh_conn_id=self.CONN_SSH_WITH_PRIVATE_KEY_EXTRA,
remote_host='remote_host',
port='port',
username='username',
timeout=10,
)
with hook.get_tunnel(1234):
ssh_mock.assert_called_once_with('remote_host',
ssh_port='port',
ssh_username='username',
ssh_pkey=TEST_PKEY,
ssh_proxy=None,
local_bind_address=('localhost',),
remote_bind_address=('localhost', 1234),
host_pkey_directories=[],
logger=hook.log)
def test_ssh_connection(self):
hook = SSHHook(ssh_conn_id='ssh_default')
with hook.get_conn() as client:
# Note - Pylint will fail with no-member here due to https://github.com/PyCQA/pylint/issues/1437
(_, stdout, _) = client.exec_command('ls') # pylint: disable=no-member
self.assertIsNotNone(stdout.read())
def test_ssh_connection_old_cm(self):
with SSHHook(ssh_conn_id='ssh_default') as hook:
client = hook.get_conn()
(_, stdout, _) = client.exec_command('ls')
self.assertIsNotNone(stdout.read())
def test_tunnel(self):
hook = SSHHook(ssh_conn_id='ssh_default')
import subprocess
import socket
subprocess_kwargs = dict(
args=["python", "-c", HELLO_SERVER_CMD],
stdout=subprocess.PIPE,
)
with subprocess.Popen(**subprocess_kwargs) as server_handle, hook.create_tunnel(2135, 2134):
server_output = server_handle.stdout.read(5)
self.assertEqual(b"ready", server_output)
socket = socket.socket()
socket.connect(("localhost", 2135))
response = socket.recv(5)
self.assertEqual(response, b"hello")
socket.close()
server_handle.communicate()
self.assertEqual(server_handle.returncode, 0)
@mock.patch('airflow.providers.ssh.hooks.ssh.paramiko.SSHClient')
def test_ssh_connection_with_private_key_extra(self, ssh_mock):
hook = SSHHook(
ssh_conn_id=self.CONN_SSH_WITH_PRIVATE_KEY_EXTRA,
remote_host='remote_host',
port='port',
username='username',
timeout=10,
)
with hook.get_conn():
ssh_mock.return_value.connect.assert_called_once_with(
hostname='remote_host',
username='username',
pkey=TEST_PKEY,
timeout=10,
compress=True,
port='port',
sock=None
)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `logistic.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import logistic
from distrax._src.utils import equivalence
import jax.numpy as jnp
import numpy as np
RTOL = 1e-2
class Logistic(equivalence.EquivalenceTest, parameterized.TestCase):
"""Logistic tests."""
def setUp(self):
# pylint: disable=too-many-function-args
super().setUp(logistic.Logistic)
self.assertion_fn = lambda x, y: np.testing.assert_allclose(x, y, rtol=RTOL)
@parameterized.named_parameters(
('1d std logistic', (0, 1)),
('2d std logistic', (np.zeros(2), np.ones(2))),
('rank 2 std logistic', (np.zeros((3, 2)), np.ones((3, 2)))),
('broadcasted loc', (0, np.ones(3))),
('broadcasted scale', (np.ones(3), 1)),
)
def test_event_shape(self, distr_params):
super()._test_event_shape(distr_params, dict())
@chex.all_variants
@parameterized.named_parameters(
('1d std logistic, no shape', (0, 1), ()),
('1d std logistic, int shape', (0, 1), 1),
('1d std logistic, 1-tuple shape', (0, 1), (1,)),
('1d std logistic, 2-tuple shape', (0, 1), (2, 2)),
('2d std logistic, no shape', (np.zeros(2), np.ones(2)), ()),
('2d std logistic, int shape', ([0, 0], [1, 1]), 1),
('2d std logistic, 1-tuple shape', (np.zeros(2), np.ones(2)), (1,)),
('2d std logistic, 2-tuple shape', ([0, 0], [1, 1]), (2, 2)),
('rank 2 std logistic, 2-tuple shape', (np.zeros((3, 2)), np.ones(
(3, 2))), (2, 2)),
('broadcasted loc', (0, np.ones(3)), (2, 2)),
('broadcasted scale', (np.ones(3), 1), ()),
)
def test_sample_shape(self, distr_params, sample_shape):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
super()._test_sample_shape(distr_params, dict(), sample_shape)
@chex.all_variants
@parameterized.named_parameters(
('float32', jnp.float32),
('float64', jnp.float64))
def test_sample_dtype(self, dtype):
dist = self.distrax_cls(
loc=jnp.zeros((), dtype), scale=jnp.ones((), dtype))
samples = self.variant(dist.sample)(seed=self.key)
self.assertEqual(samples.dtype, dist.dtype)
chex.assert_type(samples, dtype)
@chex.all_variants
@parameterized.named_parameters(
('1d std logistic, no shape', (0, 1), ()),
('1d std logistic, int shape', (0, 1), 1),
('1d std logistic, 1-tuple shape', (0, 1), (1,)),
('1d std logistic, 2-tuple shape', (0, 1), (2, 2)),
('2d std logistic, no shape', (np.zeros(2), np.ones(2)), ()),
('2d std logistic, int shape', ([0, 0], [1, 1]), 1),
('2d std logistic, 1-tuple shape', (np.zeros(2), np.ones(2)), (1,)),
('2d std logistic, 2-tuple shape', ([0, 0], [1, 1]), (2, 2)),
('rank 2 std logistic, 2-tuple shape', (np.zeros((3, 2)), np.ones(
(3, 2))), (2, 2)),
('broadcasted loc', (0, np.ones(3)), (2, 2)),
('broadcasted scale', (np.ones(3), 1), ()),
)
def test_sample_and_log_prob(self, distr_params, sample_shape):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
super()._test_sample_and_log_prob(
dist_args=distr_params,
dist_kwargs=dict(),
sample_shape=sample_shape,
assertion_fn=self.assertion_fn)
@chex.all_variants
@parameterized.named_parameters(
('1d dist, 1d value', (0, 1), 1),
('1d dist, 2d value', (0., 1.), np.array([1, 2])),
('1d dist, 2d value as list', (0., 1.), [1, 2]),
('2d dist, 1d value', (np.zeros(2), np.ones(2)), 1),
('2d broadcasted dist, 1d value', (np.zeros(2), 1), 1),
('2d dist, 2d value', (np.zeros(2), np.ones(2)), np.array([1, 2])),
('1d dist, 1d value, edge case', (0, 1), 200),
)
def test_log_prob(self, distr_params, value):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
value = np.asarray(value, dtype=np.float32)
super()._test_log_prob(distr_params, dict(), value)
@chex.all_variants
@parameterized.named_parameters(
('1d dist, 1d value', (0, 1), 1),
('1d dist, 2d value', (0., 1.), np.array([1, 2])),
('1d dist, 2d value as list', (0., 1.), [1, 2]),
('2d dist, 1d value', (np.zeros(2), np.ones(2)), 1),
('2d broadcasted dist, 1d value', (np.zeros(2), 1), 1),
('2d dist, 2d value', (np.zeros(2), np.ones(2)), np.array([1, 2])),
('1d dist, 1d value, edge case', (0, 1), 200),
)
def test_prob(self, distr_params, value):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
value = np.asarray(value, dtype=np.float32)
super()._test_attribute(
attribute_string='prob',
dist_args=distr_params,
call_args=(value,),
assertion_fn=self.assertion_fn)
@chex.all_variants
@parameterized.named_parameters(
('1d dist, 1d value', (0, 1), 1),
('1d dist, 2d value', (0., 1.), np.array([1, 2])),
('1d dist, 2d value as list', (0., 1.), [1, 2]),
('2d dist, 1d value', (np.zeros(2), np.ones(2)), 1),
('2d broadcasted dist, 1d value', (np.zeros(2), 1), 1),
('2d dist, 2d value', (np.zeros(2), np.ones(2)), np.array([1, 2])),
('1d dist, 1d value, edge case', (0, 1), 200),
)
def test_cdf(self, distr_params, value):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
value = np.asarray(value, dtype=np.float32)
super()._test_attribute(
attribute_string='cdf',
dist_args=distr_params,
call_args=(value,),
assertion_fn=self.assertion_fn)
@chex.all_variants
@parameterized.named_parameters(
('1d dist, 1d value', (0, 1), 1),
('1d dist, 2d value', (0., 1.), np.array([1, 2])),
('1d dist, 2d value as list', (0., 1.), [1, 2]),
('2d dist, 1d value', (np.zeros(2), np.ones(2)), 1),
('2d broadcasted dist, 1d value', (np.zeros(2), 1), 1),
('2d dist, 2d value', (np.zeros(2), np.ones(2)), np.array([1, 2])),
('1d dist, 1d value, edge case', (0, 1), 200),
)
def test_log_cdf(self, distr_params, value):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
value = np.asarray(value, dtype=np.float32)
super()._test_attribute(
attribute_string='log_cdf',
dist_args=distr_params,
call_args=(value,),
assertion_fn=self.assertion_fn)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('entropy', (0., 1.), 'entropy'),
('mean', (0, 1), 'mean'),
('mean from list params', ([-1, 1], [1, 2]), 'mean'),
('variance', (0, 1), 'variance'),
('variance from np params', (np.ones(2), np.ones(2)), 'variance'),
('stddev', (0, 1), 'stddev'),
('stddev from rank 2 params', (np.ones((2, 3)), np.ones(
(2, 3))), 'stddev'),
('mode', (0, 1), 'mode'),
)
def test_method(self, distr_params, function_string):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
super()._test_attribute(
function_string,
distr_params,
assertion_fn=self.assertion_fn)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('no broadcast', ([0., 1., -0.5], [0.5, 1., 1.5])),
('broadcasted loc', (0.5, [0.5, 1., 1.5])),
('broadcasted scale', ([0., 1., -0.5], 0.8)),
)
def test_median(self, distr_params):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
dist = self.distrax_cls(*distr_params)
self.assertion_fn(self.variant(dist.median)(), dist.mean())
def test_jitable(self):
super()._test_jittable((0., 1.))
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
('ellipsis', (Ellipsis, -1)),
)
def test_slice(self, slice_):
loc = jnp.array(np.random.randn(3, 4, 5))
scale = jnp.array(np.random.randn(3, 4, 5))
dist = self.distrax_cls(loc=loc, scale=scale)
self.assertion_fn(dist[slice_].mean(), loc[slice_])
def test_slice_different_parameterization(self):
loc = jnp.array(np.random.randn(4))
scale = jnp.array(np.random.randn(3, 4))
dist = self.distrax_cls(loc=loc, scale=scale)
self.assertion_fn(dist[0].loc, loc) # Not slicing loc.
self.assertion_fn(dist[0].scale, scale[0])
if __name__ == '__main__':
absltest.main()
|
|
# Copyright 2016 pmrevelle
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
from numpy import ones, arange, r_, meshgrid
import osr
from datetime import datetime, timedelta
from dateutil import rrule
import scipy.ndimage as ndimage
from recharge import NUMS, NUMSIZE
from recharge.tools import write_map, read_map
from app.paths import paths
# For Walnut Gulch
C = 243 #2304,NM
R = 74 #3154,NM
def time_interpolation(base_dir, day, finalyear):
year = day.year
#base_dir = 'G:\\Walnut\\Modis\\'
# output 'F:\\ETRM_Inputs\\NDVI_spline\\'
cnt = day.timetuple().tm_yday
i = 0
first_date = 0
while first_date == 0:
# print(i)
if cnt == NUMS[i]:
first_date = NUMS[i]
next_date = NUMS[i + 1]
elif (cnt < NUMS[i]) and (cnt > NUMS[i - 1]):
# print('two')
first_date = NUMS[i - 1]
next_date = NUMS[i]
elif (cnt > NUMS[i]) and (cnt < NUMS[i + 1]):
# print('three')
# print(NUMS[i + 1])
first_date = NUMS[i]
next_date = NUMS[i + 1]
elif (cnt >= 353) and (year == finalyear):
first_date = NUMS[NUMSIZE - 1]
# print('first_date: ', first_date)
next_date = first_date
# print('next_date: ', next_date)
elif cnt >= 353:
first_date = NUMS[NUMSIZE - 1]
# print('first_date: ', first_date)
i = 0
next_date = NUMS[i]
# print('next_date: ', next_date)
i += 1
print('-----------------------------------------------------------------------------')
print('DOY:', cnt)
# print(year)
raster_first_date = datetime(year, 1, 1) + timedelta(first_date - 1)
print('raster first date: ', raster_first_date)
td = timedelta(next_date - 1)
if (cnt >= 353) and (year == finalyear):
newyear = year
raster_next_date = datetime(newyear, 1, 1) + td
elif cnt >= 353:
newyear = year + 1
# print(year)
raster_next_date = datetime(newyear, 1, 1) + td
elif cnt < 353:
newyear = year
raster_next_date = datetime(newyear, 1, 1) + td
rfd = raster_first_date.timetuple()
# tail = '{}_{:02n}_{:02n}.tif'.format(year, rfd.tm_mon, rfd.tm_mday)
tail = '{}{:03n}.tif'.format(year, rfd.tm_yday)
raster_now = os.path.join(base_dir, '{}'.format(tail))
print('First raster to interpolate: ', raster_now)
# resX, resY, cols, rows, Lon, Lat, ndvi, prj, FillVal = read_map(os.path.join(base_dir, raster_now), 'Gtiff')
ndvi = read_map(os.path.join(base_dir, raster_now), 'Gtiff')[6]
rnd = raster_next_date.timetuple()
# tail2 = '{}_{:02n}_{:02n}.tif'.format(newyear, rnd.tm_mon, rnd.tm_mday)
tail2 = '{}{:03n}.tif'.format(newyear, rnd.tm_yday)
raster_next = os.path.join(base_dir, '{}'.format(tail2))
print('Future raster to interpolate with: ', raster_next)
# resX, resY, cols, rows, Lon, Lat, ndvinext, prj, FillVal = read_map(os.path.join(base_dir, raster_next), 'Gtiff')
ndvinext = read_map(os.path.join(base_dir, raster_next), 'Gtiff')[6]
# arr1 = ndvi
# arr2 = ndvinext
# rejoin Linke, LinkeNext into a single array of shape (2, 2160, 4320)
arr = r_['0,3', ndvi, ndvinext]
# print('arr.shape',arr.shape)
# define the grid coordinates where you want to interpolate
latitude_index = arange(R)
longitude_index = arange(C)
y, x = meshgrid(longitude_index, latitude_index)
# print('X',X)
# print(X.shape)
# print('Y',Y)
# print(Y.shape)
# Setup time variables for interpolation
days_dif = raster_next_date - day
days_dif = float(days_dif.days)
max_days_diff = raster_next_date - raster_first_date
max_days_diff = float(max_days_diff.days)
# proportion = float(days_dif / max_days_diff)
# print('proportion',proportion)
print('day', day)
print('days difference from next ndvi raster', days_dif)
print('out of max days difference', max_days_diff)
if (cnt >= 353) and (year == finalyear):
interp = 0.0 # Set to 0 otherwise will divide by zero and give error
else:
interp = 1 - (days_dif / max_days_diff) # 1 = weight completely next month values, 0 = previous month
print('interp ratio between monthly images', interp)
# 0.5 corresponds to half way between arr1 and arr2
coordinates = ones((R, C)) * interp, x, y
# coordones = np.ones((2525, 2272)) * interp
# print('coordinates',coordinates)
# print(coordones.shape)
# given arrays, interpolate at coordinates (could be any subset but in this case using full arrays)
newarr = ndimage.map_coordinates(arr, coordinates, order=2)
return newarr
# def add_one_month(dt0):
# dt1 = dt0.replace(day=1)
# dt2 = dt1 + timedelta(days=32)
# dt3 = dt2.replace(day=1)
# return dt3
#
# def subtract_one_month(dt0):
# dt1 = dt0.replace(day=1)
# dt2 = dt1 - timedelta(days=2)
# dt3 = dt2.replace(day=1)
# return dt3
def main():
paths.build('F:')
startday = datetime(2000, 2, 18, 0)
endday = datetime(2016, 1, 1, 0)
finalyear = 2016
base_dir = 'H:\\Walnut\\Modis\\'#paths.ndvi_individ
output ='H:\\Walnut\\Modis\\InterNew'#paths.ndvi_spline
ref_map = os.path.join(base_dir, 'RR2000049.tif')
_, _, _, _, lon, lat, linke, prj, fill_val = read_map(ref_map, 'Gtiff')
srs = osr.SpatialReference(prj)
sr_wkt = srs.ExportToWkt()
for day in rrule.rrule(rrule.DAILY, dtstart=startday, until=endday):
nr = day.strftime('%j')
year = day.strftime('%Y')
# ndvi_daily = time_interpolation(day, lat, lon, finalyear)
ndvi_daily = time_interpolation(base_dir, day, finalyear)
# Write daily values to new daily rasters
daily_doy = 'ndvi{}_{}.tif'.format(year, nr) # or modis, lol
outpath = os.path.join(output, year)
if not os.path.exists(outpath):
os.makedirs(outpath)
outname = os.path.join(outpath, daily_doy)
write_map(outname, 'Gtiff', lon, lat, ndvi_daily, sr_wkt, fill_val)
if __name__ == '__main__':
main()
# =================================== EOF =========================
# def readMap(fileName, fileFormat):
# """
# read geographical file into memory
#
# :param fileName: Name of the file to read
# :param fileFormat: Gdal format string
# :param logger: logger object
# :return: resX, resY, cols, rows, x, y, data, FillVal
# """
#
# # Open file for binary-reading
#
# mapFormat = gdal.GetDriverByName(fileFormat)
# mapFormat.Register()
# ds = gdal.Open(fileName)
# prj = ds.GetProjection()
# # Retrieve geoTransform info
# geotrans = ds.GetGeoTransform()
# originX = geotrans[0]
# originY = geotrans[3]
# resX = geotrans[1]
# resY = geotrans[5]
# cols = ds.RasterXSize
# rows = ds.RasterYSize
# x = np.linspace(originX + resX / 2, originX + resX / 2 + resX * (cols - 1), cols)
# y = np.linspace(originY + resY / 2, originY + resY / 2 + resY * (rows - 1), rows)
# # Retrieve raster
# RasterBand = ds.GetRasterBand(1) # there's only 1 band, starting from 1
# # print(x)
# # print(y)
#
# data = RasterBand.ReadAsArray(0, 0, cols, rows)
# FillVal = RasterBand.GetNoDataValue()
# RasterBand = None
# del ds
# return resX, resY, cols, rows, x, y, data, prj, FillVal
#
#
# def writeMap(fileName, fileFormat, x, y, data, prj, FillVal):
# """
# Write geographical data into file. Also replave NaN bu FillVall
#
# :param fileName:
# :param fileFormat:
# :param x:
# :param y:
# :param data:
# :param FillVal:
# :return:
# """
#
# verbose = False
# gdal.AllRegister()
# driver1 = gdal.GetDriverByName('GTiff')
# driver2 = gdal.GetDriverByName(fileFormat)
#
# data[np.isnan(data)] = FillVal
# # Processing
# if verbose:
# print 'Writing to temporary file ' + fileName + '.tif'
# print "Output format: " + fileFormat
# # Create Output filename from (FEWS) product name and date and open for writing
# TempDataset = driver1.Create(fileName + '.tif', data.shape[1], data.shape[0], 1, gdal.GDT_Float32)
# # Give georeferences
# xul = x[0] - (x[1] - x[0]) / 2
# yul = y[0] + (y[0] - y[1]) / 2
#
# TempDataset.SetGeoTransform([xul, x[1] - x[0], 0, yul, 0, y[1] - y[0]])
# TempDataset.SetProjection(prj)
# # get rasterband entry
# TempBand = TempDataset.GetRasterBand(1)
# # fill rasterband with array
# TempBand.WriteArray(data, 0, 0)
# TempBand.FlushCache()
# TempBand.SetNoDataValue(FillVal)
# # Create data to write to correct format (supported by 'CreateCopy')
# if verbose:
# print 'Writing to ' + fileName
# outDataset = driver2.CreateCopy(fileName, TempDataset, 0)
# TempDataset = None
# outDataset = None
# if verbose:
# print 'Removing temporary file ' + fileName + '.tif'
# os.remove(fileName + '.tif');
#
# if verbose:
# print 'Writing to ' + fileName + ' is done!'
|
|
# -*- coding: UTF-8 -*-
# from machete import __version__
from machete import log
# from machete import log_filename
import shutil
# import errno
import os
from os.path import expanduser
import subprocess
# import stat
import tempfile
IGNORE_PATTERNS = ('*.pyc')
is_chicken = False
windows = os.name == 'nt'
def os_call(path):
log.debug(path)
proc = subprocess.Popen(path.split(' '), stdout=subprocess.PIPE)
(out, err) = proc.communicate()
return out
def replace_infile(lookfor, replace, text_file):
try:
infile = open(text_file)
outfile = open(text_file + '.tmp', 'w')
# replacements = {'zero':'0', 'temp':'bob', 'garbage':'nothing'}
replacements = {lookfor: replace}
for line in infile:
for src, target in replacements.iteritems():
line = line.replace(src, target)
outfile.write(line)
infile.close()
outfile.close()
shutil.move(text_file + '.tmp', text_file)
return True
except Exception, e:
print str(e)
return False
def copytree(src, dst, ignore=shutil.ignore_patterns(IGNORE_PATTERNS)):
# print('trying to copy from '+src+' to '+dst)
if not os.path.exists(dst):
os.makedirs(dst)
shutil.copystat(src, dst)
lst = os.listdir(src)
if ignore:
excl = ignore(src, lst)
lst = [x for x in lst if x not in excl]
for item in lst:
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
copytree(s, d, ignore)
else:
shutil.copy2(s, d)
def copy_files(template):
print("Copying files from template...")
try:
if not is_chicken:
run_file = os.path.realpath(__file__)
run_folder = os.path.dirname(run_file)
path = os.path.join(os.path.dirname(run_folder), "templates")
base_path = os.path.join(path, "_base")
template_path = os.path.join(path, template)
copytree(base_path, '.')
copytree(template_path, '.')
return True
except Exception, e:
print str(e)
return False
def git_init():
print("Initializing GIT...")
if not is_chicken:
try:
os_call('git init')
except:
pass # thats ok, you can sort of live without git
def git_add_commit():
if not is_chicken:
try:
os_call('git add .')
os_call('git commit . -m "Initial commit"')
except:
pass # thats ok, you can sort of live without git
def rename_files(project):
print("Modifying copied files names...")
try:
if not is_chicken:
shutil.move('packagesample', project)
return True
except Exception, e:
print str(e)
return False
def perform_replaces(project):
print("Modifying copied files content...")
try:
if not is_chicken:
cfiles = []
for root, dirs, files in os.walk('.'):
for file in files:
if file.endswith('.py'):
cfiles.append(os.path.join(root, file))
if file.endswith('.rst'):
cfiles.append(os.path.join(root, file))
if file.endswith('.cfg'):
cfiles.append(os.path.join(root, file))
if file.endswith('.in'):
cfiles.append(os.path.join(root, file))
if file.endswith('.venv'):
cfiles.append(os.path.join(root, file))
if file.endswith('.coveragerc'):
cfiles.append(os.path.join(root, file))
for each in cfiles:
suffix = '{{ packagesample'
replace_infile(suffix + ' }}', project, each)
replace_infile(suffix + '.version }}', '0.0.1', each)
replace_infile(suffix + '.release_date }}', '2014', each)
replace_infile(suffix + '.repo_name }}', project, each)
replace_infile(suffix + '.project_name', 'Tha Project', each)
replace_infile(suffix + '.full_name', 'Your Name', each)
replace_infile(suffix + '.year', '2014', each)
replace_infile('packagesample', project, each)
return True
except Exception, e:
print str(e)
return False
# def has_virtualenv():
# try:
# return 'Usage' in os_call('virtualenv')
# except:
# return False
# def has_virtualenvwrapper():
# if has_virtualenv():
# try:
# if not windows:
# return '/usr' in os_call('which virtualenvwrapper.sh')
# else:
# return False
# except:
# return False
# else:
# return False
def create_venv(project):
print("Creating virtualenv...")
try:
if not is_chicken:
log.debug('Creating virtualenv')
home_dir = expanduser('~').replace('\\', '/')
venv_path = os.path.join(home_dir, '.virtualenvs')
try:
os.makedirs(venv_path)
except:
pass
full_venv_path = os.path.join(venv_path, project)
os_call('virtualenv ' + full_venv_path)
print("Virtualenv created!!!\n")
print("Installing dependencies...")
os_call('vex ' + project + ' pip install -r requirements.txt')
print("Now development depencencies... please wait a bit more.")
os_call('vex ' + project + ' pip install -r requirements-dev.txt')
print("Installed!")
return True
except Exception, e:
print str(e)
return False
def main(template, chicken):
global is_chicken
is_chicken = chicken
if is_chicken:
log.debug('RUNNING ON CHICKEN MODE')
print('*** RUNNING ON CHICKEN MODE ***')
if os.listdir('.'):
message = 'Current directory is not empty, so machete cannot run :-('
log.warn(message)
print(message)
exit(1)
project = os.path.basename(os.getcwd()).replace('-', '_')
# git_init()
if copy_files(template):
if rename_files(project):
if perform_replaces(project):
if create_venv(project):
print('\nIts done!')
print('\nRun with "vex ' + project + ' python run.py"')
print('Check for the log file under ' +
tempfile.gettempdir())
|
|
import os
import report
import program_report
import config_report
import target
import paths
def generate(program_run_ids):
__prepare(program_run_ids)
program_runs = {}
for program_run_id in program_run_ids:
program_report.generate_for_completed(program_run_id) # recreate reports
program_runs[program_run_id] = program_report.get_run_data(program_run_id)
__create_page(program_runs)
def __create_page(program_runs):
report_file_path = __report_url(program_runs)
file = open(report_file_path, 'w') # Trying to create a new file or open one
file.write(__page_markup(program_runs))
file.close()
print("Wrote combined report to:\n" + report_file_path)
def __dir_name(program_run_ids):
dir_name = ".".join(sorted(program_run_ids))
# limit dir name to 100
return dir_name[:100]
def __report_url(program_runs):
return __report_path(program_runs.keys()) + "/report.html"
def __report_path(program_run_ids):
return paths.combined_program_reports_dir() + "/" + __dir_name(program_run_ids)
def __prepare(program_run_ids):
report_path = __report_path(program_run_ids)
try:
os.mkdir(report_path)
except:
pass # print "Path exists: " + report_path
paths.set_sub_reports_prefix("combined_program_runs/" + __dir_name(program_run_ids))
def __page_markup(program_runs):
markup = report.header("Target report", "For all scenarios: actions/s results vs targets", "")
markup += __program_runs_markup(program_runs)
markup += __combined_results_markup(program_runs)
markup += report.footer()
return markup
def __combined_results_markup(program_runs):
target_chart_scenarios = __target_chart_scenarios(program_runs) # get the scenarios valid for target charts
markup = ""
scenarios_markup = ""
scenarios_markup += __combined_scenarios_summary_markup(target_chart_scenarios, program_runs)
for scenario_name in target_chart_scenarios:
scenarios_markup += __combined_scenario_markup(scenario_name, program_runs)
markup += report.section("Combined action/s charts for scenarios with targets", scenarios_markup)
return markup
def __combined_scenarios_summary_markup(targets_chart_scenarios, program_runs):
__combined_target_chart(targets_chart_scenarios, program_runs)
return report.chart_markup("all_runs_and_targets", "")
def __scenario_chart_name(scenario_name):
return scenario_name + ".combined"
def __combined_scenario_markup(scenario_name, program_runs):
# create a chart for the scenario with targets and one bar for each program_run
__target_chart(scenario_name, program_runs)
# create markup
return report.chart_markup(__scenario_chart_name(scenario_name), "")
def __program_runs_markup(program_runs):
markup = ""
# list program_runs with links to them with a short one line summary
run_list_markup = ""
for program_run_id in program_runs:
run_list_markup += report.link_title(paths.relative_path(__report_url(program_runs), program_report.complete_report_url(program_run_id)), "Program_run " + program_run_id, __program_run_1line_summary(program_runs[program_run_id]))
markup += report.section("Program runs", run_list_markup)
return markup
def __program_run_1line_summary(program_run):
markup = "<div style=\"margin-top: -20px;\">configs:<br/>"
for config_name, config in program_run['program']['configs'].iteritems():
markup += config_name + " (scenarios: "
scenario_strs = []
for scenario_name, scenario in config['scenarios'].iteritems():
scenario_strs.append("%s %.02f%%" % (scenario_name, scenario["percent-of-users"]))
markup += ", ".join(scenario_strs)
markup += ("), %d total users <br/>" % (config["users"]))
return "" + markup + "</div>"
# return "<pre>" + json.dumps(program_run['program']['configs'], indent=4, separators=(',', ': ')) + "</pre><br/>"
def __target_chart_scenarios(program_runs):
scenarios = {}
for program_name, program_run in program_runs.iteritems():
for config_name, config in program_run['program']['configs'].iteritems():
for scenario_name, scenario in config['scenarios'].iteritems():
scenarios[scenario_name] = True
return scenarios
def __combined_target_chart(target_chart_scenarios, program_runs):
bar_args = {}
if len(program_runs) > 5:
bar_args["truncate_legend"] = 30
chart = report.pygal_bar(**bar_args)
# chart.x_labels = map(str, range(2002, 2013))
# chart.add('Firefox', [None, None, 0, 16.6, 25, 31, 36.4, 45.5, 46.3, 42.8, 37.1])
# chart.add('Chrome', [None, None, None, None, None, None, 0, 3.9, 10.8, 23.8, 35.3])
sets = {}
# collect target sets
for target_name, target_arr in target.get_targets().iteritems():
target_title = "Target " + target_name
sets[target_title] = {}
for scenario_name in target_chart_scenarios:
sets[target_title][scenario_name] = target_arr[scenario_name]['actions-per-sec']
for program_run_name, program_run in program_runs.iteritems():
sets[program_run_name] = {}
# collect scenario run for each program sets
for config_name, config_run in program_run['config_runs'].iteritems():
config_params = config_run['config_params']
for scenario_name in config_run['config_params']['scenarios']:
scenario_params = config_params['scenarios'][scenario_name]
sets[program_run_name][scenario_name] = config_report.scenario_peak_core_actions_per_sec_avg(scenario_name, config_run['enriched_metrics'], scenario_params)
chart.title = 'All scenarios: action/s - program comparison'
# make entries
data_sets = {}
x_labels = []
for set_name, set in sets.iteritems():
data_sets[set_name] = {"data":[]}
shared_prefix = __shared_prefix(sets.keys())
for set_name in data_sets:
if shared_prefix:
shortened_set_name = set_name.replace(shared_prefix, "", 1)
else:
shortened_set_name = set_name
set_title = shortened_set_name
data_sets[set_name]["title"] = {"title": set_title, "data":[]}
x_labels = []
for scenario_name in target_chart_scenarios:
x_labels.append(scenario_name)
x_labels = sorted(x_labels)
# arrange data arrays according to sorted scenario_names
for scenario_name in x_labels:
for set_name, _set in sets.iteritems():
if (scenario_name in sets[program_run_name]) and (scenario_name in _set):
data_sets[set_name]["data"].append(_set[scenario_name])
else:
data_sets[set_name]["data"].append(None)
chart.x_labels = map(__format_set_title, x_labels)
# att chart sets for each target and program_run
sorted_set_names = sorted(data_sets)
for set_name in sorted_set_names:
chart.add(data_sets[set_name]["title"], data_sets[set_name]["data"])
chart.render_to_file(__report_path(program_runs.keys()) + "/all_runs_and_targets.svg")
def __shared_prefix(names):
"""
find whatever shared prefix in names
"""
shared_prefix = None
first_run = True
diff_pos = 0
for name in names:
if "Target" in name or "TARGET" in name:
continue
if first_run: # set the proposed prefix to whole of first name
first_run = False
shared_prefix = name
else:
# find first character which is different
diff_pos = paths.first_diff_pos(shared_prefix, name)
if diff_pos == 0: # if first char is different then no shared_prefix exists
return None
else:
shared_prefix = name[:diff_pos] # cut shared_prefix off at first matching diff
return shared_prefix
def __format_set_title(key, max_len = 100):
return (key[:(max_len-2)] + "..") if len(key) > max_len else key
def __target_chart(scenario_name, program_runs):
chart = report.pygal_bar()
entries = {}
# get targets
for target_name, target_arr in target.get_targets().iteritems():
target_title = "Target " + target_name
entries[target_title.upper()] = target_arr[scenario_name]['actions-per-sec']
program_scenarios_processed = {}
# for each config in each program try to find a matching scenario
for program_run_name, program_run in program_runs.iteritems():
program_scenarios_processed[program_run_name] = {}
for config_name, config_run in program_run['config_runs'].iteritems():
if scenario_name in program_scenarios_processed[program_run_name]: # already checked for this program
break
if scenario_name in config_run['config_params']['scenarios']:
config_params = config_run['config_params']
scenario_params = config_params['scenarios'][scenario_name]
# if we have an empty entry here from previous iterations, then remove it, since we have found a valid one
if program_run_name in entries:
entries.pop(program_run_name, None)
program_run_title = "%s (%d vu's)" % (program_run_name, int(config_params['users'] * (scenario_params["percent-of-users"] / 100.0)))
entries[program_run_title] = config_report.scenario_peak_core_actions_per_sec_avg(scenario_name, config_run['enriched_metrics'], scenario_params)
program_scenarios_processed[program_run_name][scenario_name] = True
else:
entries[program_run_name] = None
chart.title = scenario_name + ': action/s - program comparison'
sorted_keys = sorted(entries)
sorted_vals = map(lambda key: entries[key], sorted_keys)
# format x_labels
shared_prefix = __shared_prefix(sorted_keys)
chart.x_labels = map(lambda key: key.replace(shared_prefix, "", 1) if shared_prefix else key, map(__format_set_title, sorted_keys))
chart.add('Actions/s', sorted_vals)
chart.render_to_file(__report_path(program_runs.keys()) + "/" + __scenario_chart_name(scenario_name) + ".svg")
# Alternative implementation: Adding targets as separate data sets:
# # add one data set for each target to visualize targets
# for target_name, target_arr in target.get_targets().iteritems():
# target_title= "Target " + target_name
# target_val = target_arr[scenario_name]['actions-per-sec']
# chart.add(target_title, [target_val])
# for program_run_name, program_run in program_runs.iteritems():
# for config_name, config_run in program_run['config_runs'].iteritems():
# if config_run['config_params']['scenarios'][scenario_name]:
# config_params = config_run['config_params']
# scenario_params = config_params['scenarios'][scenario_name]
# program_run_title = "%s (%d vu's)" % (program_run_name, int(config_params['users'] * (scenario_params["percent-of-users"] / 100.0)))
# entries[program_run_title] = config_report.scenario_peak_core_actions_per_sec_avg(scenario_name, config_run['enriched_metrics'])
# else:
# entries[program_run_name] = None
# chart.title = scenario_name + ': action/s - across runs'
# chart.x_labels = entries.keys()
# chart.add('Actions/s', entries.values())
|
|
# Copyright 2014 Dave Kludt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pitchfork.setup_application import create_app
from pitchfork.config import config
from datetime import datetime
from bson import ObjectId
from uuid import uuid4
import unittest
import urlparse
import json
import mock
import re
class ProductTests(unittest.TestCase):
def setUp(self):
check_db = re.search('_test', config.MONGO_DATABASE)
if not check_db:
test_db = '%s_test' % config.MONGO_DATABASE
else:
test_db = config.MONGO_DATABASE
self.pitchfork, self.db = create_app(test_db)
self.app = self.pitchfork.test_client()
self.app.get('/')
def tearDown(self):
self.db.sessions.remove()
self.db.settings.remove()
self.db.forms.remove()
self.db.api_settings.remove()
self.db.autoscale.remove()
self.db.favorites.remove()
self.db.feedback.remove()
self.db.history.remove()
def setup_user_login(self, sess):
sess['username'] = 'test'
sess['csrf_token'] = 'csrf_token'
sess['role'] = 'logged_in'
sess['_permanent'] = True
sess['ddi'] = '654846'
sess['cloud_token'] = uuid4().hex
def setup_admin_login(self, sess):
sess['username'] = 'oldarmyc'
sess['csrf_token'] = 'csrf_token'
sess['role'] = 'administrators'
sess['_permanent'] = True
sess['ddi'] = '654846'
sess['cloud_token'] = uuid4().hex
def setup_useable_api_call(self, tested=None):
data = {
'api_uri': '/v1.0/{ddi}/groups',
'doc_url': 'http://docs.rackspace.com',
'short_description': 'Test API Call',
'title': 'Test Call',
'verb': 'GET',
'variables': [],
'allow_filter': True,
'group': 'scaling_group'
}
if tested:
data['tested'] = 'True'
insert = self.db.autoscale.insert(data)
return insert
def setup_useable_api_call_with_variables(self):
data = {
'api_uri': '{ddi}/groups',
'doc_url': 'http://docs.rackspace.com',
'short_description': 'Test API Call',
'title': 'Test Call',
'verb': 'GET',
'use_data': True,
'group': 'scaling_group',
'data_object': "{\r\n \"test_var\": \"{test_var_value}\"\r\n}",
'variables': [
{
'field_type': 'text',
'description': 'Test Variable',
'required': True,
'duplicate': True,
'field_display_data': '',
'id_value': 0,
'field_display': 'TextField',
'variable_name': 'test_var_value'
}
]
}
insert = self.db.autoscale.insert(data)
return insert
def setup_useable_feedback(self):
data = {
'category': 'deprecated',
'feedback': 'test feedback info',
'submitted': datetime.now(),
'call_id': '528e098b192a8b99f956e5e7',
'product_url': '/autoscale',
'product_db': 'autoscale',
'complete': False
}
return self.db.feedback.insert(data)
def retrieve_csrf_token(self, data, variable=None):
temp = re.search('id="csrf_token"(.+?)>', data)
token = None
if temp:
temp_token = re.search('value="(.+?)"', temp.group(1))
if temp_token:
token = temp_token.group(1)
if variable:
var_temp = re.search('id="variable_0-csrf_token"(.+?)>', data)
if var_temp:
var_token = re.search('value="(.+?)"', var_temp.group(1))
if var_token:
return token, var_token.group(1)
else:
return token, None
else:
return token, None
else:
return token
""" Product Management Autoscale - Perms Test """
def test_pf_autoscale_manage_admin_perms(self):
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get('/autoscale/manage')
assert response._status_code == 200, (
'Invalid response code %s' % response._status_code
)
self.assertIn(
'Manage Settings',
response.data,
'Did not find correct HTML on page'
)
def test_pf_autoscale_manage_admin_perms_no_settings(self):
self.db.api_settings.remove()
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get('/autoscale/manage')
assert response._status_code == 200, (
'Invalid response code %s' % response._status_code
)
self.assertIn(
'Manage Settings',
response.data,
'Did not find correct HTML on page'
)
def test_pf_autoscale_manage_user_perms(self):
with self.app as c:
with c.session_transaction() as sess:
self.setup_user_login(sess)
response = c.get('/autoscale/manage')
assert response._status_code == 302, (
'Invalid response code %s' % response._status_code
)
location = response.headers.get('Location')
o = urlparse.urlparse(location)
self.assertEqual(
o.path,
'/',
'Invalid redirect location %s, expected "/"' % o.path
)
""" Functional Tests """
def test_pf_autoscale_manage_add_update(self):
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get('/autoscale/manage')
token = self.retrieve_csrf_token(response.data)
data = {
'csrf_token': token,
'title': 'Test',
'app_url': '/test',
'us_api': 'http://us.test.com',
'uk_api': 'http://uk.test.com',
'doc_url': 'http://doc.test.com',
'require_dc': True,
'active': True
}
response = c.post(
'/autoscale/manage',
data=data,
follow_redirects=True
)
self.assertIn(
'Product was successfully updated',
response.data,
'Incorrect flash message after add data'
)
api_settings = self.db.api_settings.find_one()
autoscale = api_settings.get('autoscale')
updated = False
if autoscale.get('title') == 'Test':
updated = True
assert updated, 'Product was not updated successfully'
def test_pf_autoscale_manage_add_update_disable(self):
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get('/autoscale/manage')
token = self.retrieve_csrf_token(response.data)
data = {
'csrf_token': token,
'title': 'Test',
'app_url': '/test',
'us_api': 'http://us.test.com',
'uk_api': 'http://us.test.com',
'doc_url': 'http://doc.test.com',
'require_dc': True
}
response = c.post(
'/autoscale/manage',
data=data,
follow_redirects=True
)
self.assertIn(
'Product was successfully updated',
response.data,
'Incorrect flash message after data update'
)
api_settings = self.db.api_settings.find_one()
autoscale = api_settings.get('autoscale')
updated = False
if autoscale.get('title') == 'Test':
updated = True
assert updated, 'Product was not updated successfully'
active_products = api_settings.get('active_products')
not_active = False
if 'autoscale' not in active_products:
not_active = True
assert not_active, 'Product was not removed from active products'
def test_pf_autoscale_manage_add_bad_data(self):
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
data = {
'title': 'Test',
'app_url': '/test',
'us_api': 'http://us.test.com',
'uk_api': 'http://us.test.com',
'doc_url': 'http://doc.test.com',
'require_dc': True,
'active': True
}
response = c.post(
'/autoscale/manage',
data=data,
follow_redirects=True
)
self.assertIn(
'Form was not saved successfully',
response.data,
'Incorrect flash message after add bad data'
)
calls = self.db.autoscale.find()
assert calls.count() == 0, 'Call added when it should not have been'
def test_pf_autoscale_reorder_groups_demote(self):
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
before = self.db.api_settings.find_one(
{'autoscale.groups.slug': 'scaling_groups'},
{'autoscale.groups.$': 1}
)
response = c.get(
'/autoscale/groups/scaling_groups/demote',
)
assert response.status_code == 200, 'Incorrect status code returned'
after = self.db.api_settings.find_one(
{'autoscale.groups.slug': 'scaling_groups'},
{'autoscale.groups.$': 1}
)
before_group = before.get('autoscale').get('groups')[0].get('order')
after_group = after.get('autoscale').get('groups')[0].get('order')
assert before_group + 1 == after_group, 'Incorrect order after demote'
def test_pf_autoscale_reorder_groups_promote(self):
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
before = self.db.api_settings.find_one(
{'autoscale.groups.slug': 'configurations'},
{'autoscale.groups.$': 1}
)
response = c.get(
'/autoscale/groups/configurations/promote',
)
assert response.status_code == 200, 'Incorrect status code returned'
after = self.db.api_settings.find_one(
{'autoscale.groups.slug': 'configurations'},
{'autoscale.groups.$': 1}
)
before_group = before.get('autoscale').get('groups')[0].get('order')
after_group = after.get('autoscale').get('groups')[0].get('order')
assert before_group - 1 == after_group, 'Incorrect order after demote'
def test_pf_autoscale_reorder_groups_bad_product(self):
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get(
'/bad_product/groups/configurations/promote',
)
assert response.status_code == 400, 'Incorrect status code returned'
""" Product API Management Autoscale - Perms Test """
def test_pf_autoscale_manage_api_admin_perms(self):
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
c.get('/')
response = c.get('/autoscale/manage/api')
assert response._status_code == 200, (
'Invalid response code %s' % response._status_code
)
self.assertIn(
'Autoscale - API Calls',
response.data,
'Did not find correct HTML on page'
)
def test_pf_autoscale_manage_api_user_perms(self):
with self.app as c:
with c.session_transaction() as sess:
self.setup_user_login(sess)
response = c.get('/autoscale/manage/api')
assert response._status_code == 302, (
'Invalid response code %s' % response._status_code
)
location = response.headers.get('Location')
o = urlparse.urlparse(location)
self.assertEqual(
o.path,
'/',
'Invalid redirect location %s, expected "/"' % o.path
)
""" API Add """
def test_pf_autoscale_manage_api_add_admin_perms(self):
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get('/autoscale/manage/api/add')
assert response._status_code == 200, (
'Invalid response code %s' % response._status_code
)
self.assertIn(
'Add API Call',
response.data,
'Did not find correct HTML on page'
)
def test_pf_autoscale_manage_api_add_user_perms(self):
with self.app as c:
with c.session_transaction() as sess:
self.setup_user_login(sess)
response = c.get('/autoscale/manage/api/add')
assert response._status_code == 302, (
'Invalid response code %s' % response._status_code
)
location = response.headers.get('Location')
o = urlparse.urlparse(location)
self.assertEqual(
o.path,
'/',
'Invalid redirect location %s, expected "/"' % o.path
)
def test_pf_autoscale_manage_api_add_admin_post_dupe_title(self):
self.setup_useable_api_call()
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get('/autoscale/manage/api/add')
token = self.retrieve_csrf_token(response.data)
data = {
'csrf_token': token,
'title': 'Test Call',
'doc_url': 'http://docs.rackspace.com',
'verb': 'GET',
'api_uri': '{ddi}/groups'
}
response = c.post('/autoscale/manage/api/add', data=data)
self.assertIn(
'Form validation error, please check the form and try again',
response.data,
'Could not find error alert on page'
)
self.assertIn(
'Duplicate title found',
response.data,
'Bad message when submitting duplicate title'
)
calls = self.db.autoscale.find()
assert calls.count() == 1, 'Found to many calls in database'
def test_pf_autoscale_manage_api_add_admin_post_dupe_url(self):
self.setup_useable_api_call()
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get('/autoscale/manage/api/add')
token = self.retrieve_csrf_token(response.data)
data = {
'csrf_token': token,
'title': 'Dupe Call',
'doc_url': 'http://docs.rackspace.com',
'verb': 'GET',
'api_uri': '/v1.0/{ddi}/groups'
}
response = c.post('/autoscale/manage/api/add', data=data)
self.assertIn(
'Form validation error, please check the form and try again',
response.data,
'Could not find error alert on page'
)
self.assertIn(
'Duplicate URI, Verb, and Doc combination',
response.data,
'Bad message when submitting duplicate url and verb'
)
calls = self.db.autoscale.find()
assert calls.count() == 1, 'Found to many calls in database'
def test_pf_autoscale_manage_api_add_admin_post_good(self):
self.db.autoscale.remove()
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get(
'/autoscale/manage/api/add'
)
token = self.retrieve_csrf_token(response.data)
data = {
'csrf_token': token,
'title': 'Add Call',
'doc_url': 'http://docs.rackspace.com',
'verb': 'GET',
'api_uri': '/v1.0/{ddi}/groups',
'group': 'add_new_group',
'new_group': 'Test Group',
'product': 'autoscale'
}
response = c.post(
'/autoscale/manage/api/add',
data=data,
follow_redirects=True
)
self.assertIn(
'API Call was added successfully',
response.data,
'Bad message when submitting good call'
)
calls = self.db.autoscale.find()
found = self.db.autoscale.find_one()
assert calls.count() == 1, 'Incorrect count of calls'
assert found.get('group') == 'test_group', (
'Group not find or incorrect group'
)
group = self.db.api_settings.find_one(
{'autoscale.groups.slug': 'test_group'}
)
assert group, 'Could not find added group'
def test_pf_autoscale_manage_api_add_admin_post_no_db(self):
self.db.autoscale.remove()
self.db.api_settings.update(
{}, {
'$set': {'autoscale.db_name': None}
}
)
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get(
'/autoscale/manage/api/add'
)
token = self.retrieve_csrf_token(response.data)
data = {
'csrf_token': token,
'title': 'Add Call',
'doc_url': 'http://docs.rackspace.com',
'verb': 'GET',
'api_uri': '{ddi}/groups'
}
response = c.post(
'/autoscale/manage/api/add',
data=data,
follow_redirects=True
)
self.assertIn(
'There was an issue storing the API Call. Check '
'the product and ensure the db_name is specified',
response.data,
'Bad message when submitting call without DB'
)
found_call = self.db.autoscale.find()
assert found_call.count() == 0, 'No calls should have been found'
def test_pf_autoscale_manage_api_add_post_with_vars(self):
self.db.autoscale.remove()
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get(
'/autoscale/manage/api/add'
)
token = self.retrieve_csrf_token(response.data)
data = {
'csrf_token': token,
'title': 'Add Call',
'doc_url': 'http://docs.rackspace.com',
'verb': 'GET',
'api_uri': '/v1.0/{ddi}/groups',
'group': 'add_new_group',
'new_group': 'Test Group',
'product': 'autoscale',
'variable_0-description': 'Test variable',
'variable_0-duplicate': 'y',
'variable_0-field_display': 'TextField',
'variable_0-field_type': 'text',
'variable_0-variable_name': 'test_var',
'variable_0-field_display_data': '',
'variable_0-required': 'y'
}
response = c.post(
'/autoscale/manage/api/add',
data=data,
follow_redirects=True
)
self.assertIn(
'API Call was added successfully',
response.data,
'Bad message when submitting good call'
)
calls = self.db.autoscale.find()
found = self.db.autoscale.find_one()
assert calls.count() == 1, 'Incorrect count of calls'
assert found.get('group') == 'test_group', (
'Group not find or incorrect group'
)
group = self.db.api_settings.find_one(
{'autoscale.groups.slug': 'test_group'}
)
assert group, 'Could not find added group'
assert len(found.get('variables')) == 1, 'Incorrect variable length'
variable = found.get('variables')[0]
assert variable.get('required'), 'Variable should be required'
assert variable.get('duplicate'), 'Variable should be a duplicate'
""" Edit API Call """
def test_pf_autoscale_manage_api_edit_user_perms(self):
api_id = self.setup_useable_api_call()
with self.app as c:
with c.session_transaction() as sess:
self.setup_user_login(sess)
response = c.get(
'/autoscale/manage/api/edit/%s' % api_id
)
assert response._status_code == 302, (
'Invalid response code %s' % response._status_code
)
location = response.headers.get('Location')
o = urlparse.urlparse(location)
self.assertEqual(
o.path,
'/',
'Invalid redirect location %s, expected "/"' % o.path
)
def test_pf_autoscale_manage_api_edit_admin_perms(self):
api_id = self.setup_useable_api_call()
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get(
'/autoscale/manage/api/edit/%s' % api_id
)
assert response._status_code == 200, (
'Invalid response code %s' % response._status_code
)
self.assertIn(
'Edit API Call',
response.data,
'Invalid HTML found when browsing to edit'
)
def test_pf_autoscale_manage_api_edit_admin_perms_with_vars(self):
self.setup_useable_api_call_with_variables()
call = self.db.autoscale.find_one()
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get(
'/autoscale/manage/api/edit/%s' % call.get('_id')
)
self.assertIn(
'Edit API Call',
response.data,
'Invalid HTML found when browsing to edit'
)
self.assertIn(
call.get('title'),
response.data,
'Could not find correct title in edit form'
)
self.assertIn(
call.get('doc_url'),
response.data,
'Could not find correct Document URL in edit form'
)
def test_pf_autoscale_manage_api_edit_admin_bad_post(self):
self.setup_useable_api_call()
call = self.db.autoscale.find_one()
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
data = {
'api_uri': '{ddi}/groups',
'doc_url': 'http://docs.rackspace.com',
'short_description': 'Test API Call',
'title': 'Test Call',
'verb': 'GET',
'variables': []
}
response = c.post(
'/autoscale/manage/api/edit/%s' % call.get('_id'),
data=data,
follow_redirects=True
)
self.assertIn(
'Form validation error, please check the form and try again',
response.data,
'Incorrect flash message after add bad data'
)
self.assertIn(
'Edit API Call',
response.data,
'Invalid HTML found when browsing to edit'
)
check_call = self.db.autoscale.find_one()
assert call == check_call, (
'Call was edited when it was not supposed to'
)
def test_pf_autoscale_manage_api_edit_admin_good_post(self):
api_id = self.setup_useable_api_call()
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get(
'/autoscale/manage/api/edit/%s' % api_id
)
token, var_token = self.retrieve_csrf_token(response.data, True)
data = {
'csrf_token': token,
'title': 'Test Update Call',
'short_description': 'Test Setup',
'doc_url': 'http://docs.rackspace.com',
'verb': 'GET',
'api_uri': '{ddi}/groups'
}
response = c.post(
'/autoscale/manage/api/edit/%s' % api_id,
data=data,
follow_redirects=True
)
self.assertIn(
'API Call was successfully updated',
response.data,
'Incorrect flash message after successful edit'
)
assert response._status_code == 200, (
'Invalid response code %s' % response._status_code
)
calls = self.db.autoscale.find_one(
{
'title': 'Test Update Call'
}
)
assert calls, 'Could not find updated call'
""" Set testing for API Call """
def test_pf_autoscale_manage_api_mark_tested(self):
api_id = self.setup_useable_api_call()
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get(
'/autoscale/manage/api/confirm/%s' % api_id,
follow_redirects=True
)
self.assertIn(
'API call was successfully updated',
response.data,
'Invalid response found after marking tested'
)
check_call = self.db.autoscale.find_one({'_id': ObjectId(api_id)})
assert check_call.get('tested'), 'API call was not saved as tested'
def test_pf_autoscale_manage_api_mark_untested(self):
api_id = self.setup_useable_api_call(True)
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get(
'/autoscale/manage/api/unconfirm/%s' % api_id,
follow_redirects=True
)
self.assertIn(
'API call was successfully updated',
response.data,
'Invalid response found after marking untested'
)
check_call = self.db.autoscale.find_one({'_id': ObjectId(api_id)})
assert not check_call.get('tested'), (
'API call was not marked as untested'
)
""" Delete Call """
def test_pf_autoscale_manage_api_delete_valid(self):
api_id = self.setup_useable_api_call()
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get(
'/autoscale/manage/api/delete/%s' % api_id,
follow_redirects=True
)
self.assertIn(
'API call was successfully removed',
response.data,
'Invalid response found after deleting call'
)
api_call = self.db.autoscale.find()
self.assertEquals(
api_call.count(),
0,
'Invalid api count found %d and should be 0' % api_call.count()
)
def test_pf_autoscale_manage_api_delete_invalid(self):
api_id = self.setup_useable_api_call()
self.db.autoscale.remove()
self.setup_useable_api_call()
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get(
'/autoscale/manage/api/delete/%s' % api_id,
follow_redirects=True
)
self.assertIn(
'API call was not found and nothing removed',
response.data,
'Invalid response found after invalid deletion'
)
api_call = self.db.autoscale.find()
self.assertEquals(
api_call.count(),
1,
'Invalid api count found %d and should be 1' % api_call.count()
)
""" Testing Product front """
def test_pf_autoscale_api_admin_perms_testing(self):
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get('/autoscale/testing')
assert response._status_code == 200, (
'Invalid response code %s' % response._status_code
)
self.assertIn(
'Autoscale - Testing API Calls',
response.data,
'Did not find correct HTML on page'
)
def test_pf_autoscale_api_admin_perms_testing_no_settings(self):
self.db.api_settings.remove()
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get('/autoscale/testing')
assert response._status_code == 302, (
'Invalid response code %s' % response._status_code
)
response = c.get(
'/autoscale/testing',
follow_redirects=True
)
self.assertIn(
'Product not found, please check the URL and try again',
response.data,
'Did not find correct error message on page'
)
def test_pf_autoscale_api_user_perms_testing(self):
with self.app as c:
with c.session_transaction() as sess:
self.setup_user_login(sess)
response = c.get('/autoscale/testing')
assert response._status_code == 302, (
'Invalid response code %s' % response._status_code
)
location = response.headers.get('Location')
o = urlparse.urlparse(location)
self.assertEqual(
o.path,
'/',
'Invalid redirect location %s, expected "/"' % o.path
)
""" Front product View """
def test_pf_autoscale_api_admin_perms(self):
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get('/autoscale/')
assert response._status_code == 200, (
'Invalid response code %s' % response._status_code
)
self.assertIn(
'Autoscale',
response.data,
'Did not find correct HTML on page'
)
def test_pf_autoscale_api_not_active(self):
temp = {
'app_url': '/autoscale',
'db_name': 'autoscale',
'doc_url': (
'http://docs.rackspace.com/cas/api/v1.0/autosca'
'le-devguide/content/Overview.html'
),
'title': 'Autoscale',
'active': False,
'require_region': True,
'uk_api': 'https://{region}.autoscale.api.rackspacecloud.com',
'us_api': 'https://{region}.autoscale.api.rackspacecloud.com'
}
self.db.api_settings.update({}, {'$set': {'autoscale': temp}})
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get('/autoscale/')
assert response.status_code == 302, (
'Invalid response code %s' % response.status_code
)
response = c.get(
'/autoscale/',
follow_redirects=True
)
assert response.status_code == 200, (
'Invalid response code %s' % response.status_code
)
self.assertIn(
'Product is not active and cannot be used at this time',
response.data.decode('utf-8'),
'Did not find correct HTML on page'
)
def test_pf_autoscale_api_user_perms(self):
self.setup_useable_api_call_with_variables()
with self.app as c:
with c.session_transaction() as sess:
self.setup_user_login(sess)
response = c.get('/autoscale/')
assert response._status_code == 200, (
'Invalid response code %s' % response._status_code
)
self.assertIn(
'Autoscale',
response.data,
'Did not find correct HTML on page'
)
self.assertIn(
'duplicate-field',
response.data.decode('utf-8'),
'Could not find expected class on page'
)
def test_pf_autoscale_api_admin_perms_no_settings(self):
self.db.api_settings.remove()
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get('/autoscale/')
assert response._status_code == 302, (
'Invalid response code %s' % response._status_code
)
response = c.get('/autoscale/', follow_redirects=True)
self.assertIn(
'Product not found, please check the URL and try again',
response.data,
'Did not find correct HTML on page'
)
def test_pf_autoscale_api_user_perms_no_settings(self):
self.db.api_settings.remove()
with self.app as c:
with c.session_transaction() as sess:
self.setup_user_login(sess)
response = c.get('/autoscale/')
assert response._status_code == 302, (
'Invalid response code %s' % response._status_code
)
response = c.get('/autoscale/', follow_redirects=True)
self.assertIn(
'Product not found, please check the URL and try again',
response.data,
'Did not find correct HTML on page'
)
""" Send Request to process """
def test_pf_autoscale_post_call(self):
api_id = self.setup_useable_api_call()
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
data = {
'api_verb': 'GET',
'testing': False,
'api_url': '/v1.0/{ddi}/groups',
'api_token': 'test_token',
'api_id': str(api_id),
'ddi': '123456',
'data_center': 'dfw',
'add_filter': 'limit=100'
}
with mock.patch('requests.get') as patched_get:
type(patched_get.return_value).content = mock.PropertyMock(
return_value='{"groups_links": [], "groups": []}'
)
type(patched_get.return_value).status_code = mock.PropertyMock(
return_value=200
)
type(patched_get.return_value).headers = mock.PropertyMock(
return_value=(
'{"via": "1.1 Repose (Repose/2.12)",'
'"x-response-id": "a10adb69-4d9f-4457-'
'bda4-e2429f334895",'
'"transfer-encoding": "chunked",'
'"server": "Jetty(8.0.y.z-SNAPSHOT)",'
'"date": "Tue, 18 Mar 2014 19:52:26 GMT",'
'"content-type": "application/json"}'
)
)
response = c.post(
'/autoscale/api/call/process',
data=json.dumps(data),
content_type='application/json'
)
data = json.loads(response.data)
assert data.get('response_code'), 'No response code received'
assert data.get('api_url'), 'API URL was not found'
assert data.get('response_headers'), (
'No response headers were received'
)
assert data.get('response_body'), 'No response content was received'
assert data.get('request_headers'), 'No request headers was received'
assert data.get('response_code'), (
'No response status code was received'
)
self.assertIn(
'.autoscale.api.rackspacecloud.com/v1.0/123456/groups?limit=100',
data.get('api_url'),
'Did not find the limit attached to the end of the URI'
)
def test_pf_autoscale_post_call_testing(self):
api_id = self.setup_useable_api_call()
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
data = {
'api_verb': 'GET',
'api_url': '{ddi}/groups',
'api_token': 'test_token',
'api_id': str(api_id),
'ddi': '123456',
'data_center': 'dfw',
'testing': True
}
with mock.patch('requests.get') as patched_get:
type(patched_get.return_value).content = mock.PropertyMock(
return_value='{"groups_links": [], "groups": []}'
)
type(patched_get.return_value).status_code = mock.PropertyMock(
return_value=200
)
type(patched_get.return_value).headers = mock.PropertyMock(
return_value=(
'{"via": "1.1 Repose (Repose/2.12)",'
'"x-response-id": "a10adb69-4d9f-4457-'
'bda4-e2429f334895",'
'"transfer-encoding": "chunked",'
'"server": "Jetty(8.0.y.z-SNAPSHOT)",'
'"date": "Tue, 18 Mar 2014 19:52:26 GMT",'
'"content-type": "application/json"}'
)
)
response = c.post(
'/autoscale/api/call/process',
data=json.dumps(data),
content_type='application/json'
)
data = json.loads(response.data)
assert data.get('response_code'), 'No response code received'
assert data.get('api_url'), 'API URL was not found'
assert data.get('response_headers'), (
'No response headers were received'
)
assert data.get('response_body'), 'No response content was received'
assert data.get('request_headers'), 'No request headers was received'
assert data.get('response_code'), (
'No response status code was received'
)
def test_pf_autoscale_post_call_testing_uk(self):
api_id = self.setup_useable_api_call()
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
data = {
'api_verb': 'GET',
'api_url': '{ddi}/groups',
'api_token': 'test_token',
'api_id': str(api_id),
'ddi': '123456',
'data_center': 'lon',
'testing': True
}
with mock.patch('requests.get') as patched_get:
type(patched_get.return_value).content = mock.PropertyMock(
return_value='{"groups_links": [], "groups": []}'
)
type(patched_get.return_value).status_code = mock.PropertyMock(
return_value=200
)
type(patched_get.return_value).headers = mock.PropertyMock(
return_value=(
'{"via": "1.1 Repose (Repose/2.12)",'
'"x-response-id": "a10adb69-4d9f-4457-'
'bda4-e2429f334895",'
'"transfer-encoding": "chunked",'
'"server": "Jetty(8.0.y.z-SNAPSHOT)",'
'"date": "Tue, 18 Mar 2014 19:52:26 GMT",'
'"content-type": "application/json"}'
)
)
response = c.post(
'/autoscale/api/call/process',
data=json.dumps(data),
content_type='application/json'
)
data = json.loads(response.data)
assert data.get('response_code'), 'No response code received'
assert data.get('api_url'), 'API URL was not found'
assert data.get('response_headers'), (
'No response headers were received'
)
assert data.get('response_body'), 'No response content was received'
assert data.get('request_headers'), 'No request headers was received'
assert data.get('response_code'), (
'No response status code was received'
)
def test_pf_autoscale_post_call_bad_response(self):
api_id = self.setup_useable_api_call()
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
data = {
'api_verb': 'GET',
'testing': False,
'api_url': '{ddi}/groups',
'api_token': 'test_token',
'api_id': str(api_id),
'ddi': '123456',
'data_center': 'dfw'
}
with mock.patch('requests.get') as patched_get:
type(patched_get.return_value).content = mock.PropertyMock(
return_value=''
)
type(patched_get.return_value).status_code = mock.PropertyMock(
return_value=401
)
type(patched_get.return_value).headers = mock.PropertyMock(
return_value=(
'{"via": "1.1 Repose (Repose/2.12)",'
'"x-response-id": "a10adb69-4d9f-4457-'
'bda4-e2429f334895",'
'"transfer-encoding": "chunked",'
'"server": "Jetty(8.0.y.z-SNAPSHOT)",'
'"date": "Tue, 18 Mar 2014 19:52:26 GMT",'
'"content-type": "application/json"}'
)
)
response = c.post(
'/autoscale/api/call/process',
data=json.dumps(data),
content_type='application/json'
)
data = json.loads(response.data)
assert data.get('response_code'), 'No response code received'
assert data.get('api_url'), 'API URL was not found'
assert data.get('response_headers'), (
'No response headers were received'
)
assert data.get('response_body'), 'No response content was received'
assert data.get('request_headers'), 'No request headers was received'
assert data.get('response_code'), (
'No response status code was received'
)
def test_pf_autoscale_post_call_with_data(self):
api_id = self.setup_useable_api_call_with_variables()
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
data = {
'api_verb': 'POST',
'testing': 'false',
'api_url': '{ddi}/groups',
'api_token': 'test_token',
'api_id': str(api_id),
'ddi': '123456',
'data_center': 'dfw',
'test_var_value': 'Test Group'
}
with mock.patch('requests.post') as ppost:
type(ppost.return_value).content = mock.PropertyMock(
return_value='{"groups_links": [], "groups": []}'
)
type(ppost.return_value).status_code = mock.PropertyMock(
return_value=200
)
type(ppost.return_value).headers = mock.PropertyMock(
return_value=(
'{"via": "1.1 Repose (Repose/2.12)",\
"x-response-id": "response_id",\
"transfer-encoding": "chunked",\
"server": "Jetty(8.0.y.z-SNAPSHOT)",\
"date": "Tue, 18 Mar 2014 19:52:26 GMT",\
"content-type": "application/json"}'
)
)
response = c.post(
'/autoscale/api/call/process',
data=json.dumps(data),
content_type='application/json'
)
data = json.loads(response.data)
assert data.get('response_code'), 'No response code received'
assert data.get('api_url'), 'API URL was not found'
assert data.get('response_headers'), (
'No response headers were received'
)
assert data.get('response_body'), 'No response content was received'
assert data.get('request_headers'), 'No request headers was received'
assert data.get('response_code'), (
'No response status code was received'
)
assert data.get('data_package'), (
'No response data package was received'
)
assert self.db.history.count() == 1, (
'Could not find call logged in history'
)
def test_pf_mark_favorite_add(self):
api_id = self.setup_useable_api_call(True)
with self.app as c:
with c.session_transaction() as sess:
self.setup_user_login(sess)
data = {'call_id': str(api_id)}
response = c.post(
'/autoscale/favorites/add',
data=json.dumps(data),
content_type='application/json'
)
fav_response = c.get('/favorites')
prod_response = c.get('/autoscale/')
data = json.loads(response.data)
assert data.get('code') == 200, 'Incorrect response code received'
favorites = self.db.favorites.find_one({'username': 'test'})
assert favorites, 'No favorites found'
assert favorites.get('favorites')[0].get('call_id') == str(api_id), (
'Did not find call ID in favorites'
)
assert fav_response.status_code == 200, 'Incorrect status returned'
self.assertIn(
'Test Call',
fav_response.data,
'Could not find call title in favorites'
)
assert prod_response.status_code == 200, 'Incorrect status found'
def test_pf_mark_favorite_remove(self):
api_id = self.setup_useable_api_call()
with self.app as c:
with c.session_transaction() as sess:
self.setup_user_login(sess)
data = {'call_id': str(api_id)}
response = c.post(
'/autoscale/favorites/add',
data=json.dumps(data),
content_type='application/json'
)
response = c.post(
'/autoscale/favorites/remove',
data=json.dumps(data),
content_type='application/json'
)
data = json.loads(response.data)
assert data.get('code') == 200, 'Incorrect response code received'
favorites = self.db.favorites.find_one({'username': 'test'})
assert favorites, 'No favorites found'
assert len(favorites.get('favorites')) == 0, (
'Favorites not empty'
)
def test_pf_mark_favorite_remove_no_prior(self):
api_id = self.setup_useable_api_call()
with self.app as c:
with c.session_transaction() as sess:
self.setup_user_login(sess)
data = {'call_id': str(api_id)}
response = c.post(
'/autoscale/favorites/remove',
data=json.dumps(data),
content_type='application/json'
)
data = json.loads(response.data)
assert data.get('code') == 200, 'Incorrect response code received'
favorites = self.db.favorites.find_one({'username': 'test'})
assert favorites is None, 'No favorites found'
def test_pf_mark_favorite_bad_action(self):
api_id = self.setup_useable_api_call()
with self.app as c:
with c.session_transaction() as sess:
self.setup_user_login(sess)
data = {'call_id': str(api_id)}
response = c.post(
'/autoscale/favorites/bad_action',
data=json.dumps(data),
content_type='application/json'
)
assert response.status_code == 404, 'Incorrect response code received'
favorites = self.db.favorites.find_one({'username': 'test'})
assert favorites is None, 'No favorites found'
def test_pf_mark_favorite_bad_call(self):
api_id = self.setup_useable_api_call()
self.db.autoscale.remove()
with self.app as c:
with c.session_transaction() as sess:
self.setup_user_login(sess)
data = {'call_id': str(api_id)}
response = c.post(
'/autoscale/favorites/add',
data=json.dumps(data),
content_type='application/json'
)
assert response.status_code == 404, 'Incorrect response code received'
favorites = self.db.favorites.find_one({'username': 'test'})
assert favorites is None, 'No favorites found'
def test_pf_mark_favorite_bad_product(self):
api_id = self.setup_useable_api_call()
with self.app as c:
with c.session_transaction() as sess:
self.setup_user_login(sess)
data = {'call_id': str(api_id)}
response = c.post(
'/bad_product/favorites/add',
data=json.dumps(data),
content_type='application/json'
)
assert response.status_code == 404, 'Incorrect response code received'
favorites = self.db.favorites.find_one({'username': 'test'})
assert favorites is None, 'No favorites found'
""" Feedback """
def test_pf_feedback_add(self):
api_id = self.setup_useable_api_call()
with self.app as c:
with c.session_transaction() as sess:
self.setup_user_login(sess)
data = {
'call_id': str(api_id),
'category': 'doc_link',
'feedback': 'Testing feedback',
'product_db': 'autoscale'
}
response = c.post(
'/feedback/',
data=json.dumps(data),
content_type='application/json'
)
data = json.loads(response.data)
assert data.get('code') == 200, 'Incorrect response code received'
feedback = self.db.feedback.find_one()
assert feedback, 'No feedback items found'
assert feedback.get('call_id') == str(api_id), (
'Did not find call ID in feedback'
)
def test_pf_feedback_mark_invalid_item(self):
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
data = {'feedback_id': 'bad_id'}
response = c.put(
'/feedback/',
data=json.dumps(data),
content_type='application/json'
)
assert response.status_code == 400, 'Incorrect response code received'
def test_pf_feedback_user_permissions_check(self):
feedback_id = self.setup_useable_feedback()
with self.app as c:
with c.session_transaction() as sess:
self.setup_user_login(sess)
data = {'feedback_id': str(feedback_id)}
response = c.put(
'/feedback/',
data=json.dumps(data),
content_type='application/json'
)
assert response.status_code == 404, 'Incorrect response code received'
feedback = self.db.feedback.find_one()
assert feedback, 'No feedback items found'
assert feedback.get('_id') == feedback_id, (
'Did not find correct feedback item'
)
assert feedback.get('complete') is False, (
'Feedback marked complete when it should not have been'
)
def test_pf_feedback_mark_complete(self):
feedback_id = self.setup_useable_feedback()
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
data = {'feedback_id': str(feedback_id)}
response = c.put(
'/feedback/',
data=json.dumps(data),
content_type='application/json',
)
data = json.loads(response.data)
assert data.get('code') == 200, 'Incorrect response code received'
feedback = self.db.feedback.find_one()
assert feedback, 'No feedback items found'
assert feedback.get('_id') == feedback_id, (
'Did not find correct feedback item'
)
assert feedback.get('complete'), (
'Feedback not marked complete as expected'
)
def test_pf_feedback_view(self):
self.setup_useable_feedback()
with self.app as c:
with c.session_transaction() as sess:
self.setup_admin_login(sess)
response = c.get('/feedback/')
self.assertIn(
'Submitted Feedback',
response.data,
'Could not load page with feedback'
)
feedback = self.db.feedback.find_one()
self.assertIn(
'data-feedback_id="%s"' % feedback.get('_id'),
response.data,
'Could not find feedback item'
)
def test_pf_feedback_view_user(self):
self.setup_useable_feedback()
with self.app as c:
with c.session_transaction() as sess:
self.setup_user_login(sess)
response = c.get('/feedback/')
assert response.status_code == 302, 'Invalid status code'
response = c.get('/feedback/', follow_redirects=True)
self.assertIn(
'You do not have the correct permissions to access this page',
response.data,
'Could not find error message on bad permissions'
)
|
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import range
import re
import warnings
import numpy as np
from skbio.util import cardinal_to_ordinal
_whitespace_regex = re.compile(r'\s')
_newline_regex = re.compile(r'\n')
def _chunk_str(s, n, char):
"""Insert `char` character every `n` characters in string `s`.
Canonically pronounced "chunkster".
"""
# Modified from http://stackoverflow.com/a/312464/3776794
if n < 1:
raise ValueError(
"Cannot split string into chunks with n=%d. n must be >= 1." % n)
return char.join((s[i:i+n] for i in range(0, len(s), n)))
def _decode_qual_to_phred(qual_str, variant=None, phred_offset=None):
phred_offset, phred_range = _get_phred_offset_and_range(
variant, phred_offset,
["Must provide either `variant` or `phred_offset` in order to decode "
"quality scores.",
"Decoding Solexa quality scores is not currently supported, "
"as quality scores are always stored as Phred scores in "
"scikit-bio. Please see the following scikit-bio issue to "
"track progress on this:\n\t"
"https://github.com/biocore/scikit-bio/issues/719"])
qual = np.fromstring(qual_str, dtype=np.uint8) - phred_offset
if np.any((qual > phred_range[1]) | (qual < phred_range[0])):
raise ValueError("Decoded Phred score is out of range [%d, %d]."
% (phred_range[0], phred_range[1]))
return qual
def _encode_phred_to_qual(phred, variant=None, phred_offset=None):
phred_offset, phred_range = _get_phred_offset_and_range(
variant, phred_offset,
["Must provide either `variant` or `phred_offset` in order to encode "
"Phred scores.",
"Encoding Solexa quality scores is not currently supported. "
"Please see the following scikit-bio issue to track progress "
"on this:\n\t"
"https://github.com/biocore/scikit-bio/issues/719"])
qual_chars = []
for score in phred:
if score < phred_range[0]:
raise ValueError("Phred score %d is out of range [%d, %d]."
% (score, phred_range[0], phred_range[1]))
if score > phred_range[1]:
warnings.warn(
"Phred score %d is out of targeted range [%d, %d]. Converting "
"to %d." % (score, phred_range[0], phred_range[1],
phred_range[1]), UserWarning)
score = phred_range[1]
qual_chars.append(chr(score + phred_offset))
return ''.join(qual_chars)
def _get_phred_offset_and_range(variant, phred_offset, errors):
if variant is None and phred_offset is None:
raise ValueError(errors[0])
if variant is not None and phred_offset is not None:
raise ValueError(
"Cannot provide both `variant` and `phred_offset`.")
if variant is not None:
if variant == 'sanger':
phred_offset = 33
phred_range = (0, 93)
elif variant == 'illumina1.3':
phred_offset = 64
phred_range = (0, 62)
elif variant == 'illumina1.8':
phred_offset = 33
phred_range = (0, 62)
elif variant == 'solexa':
phred_offset = 64
phred_range = (-5, 62)
raise NotImplementedError(errors[1])
else:
raise ValueError("Unrecognized variant %r." % variant)
else:
if not (33 <= phred_offset <= 126):
raise ValueError(
"`phred_offset` %d is out of printable ASCII character range."
% phred_offset)
phred_range = (0, 126 - phred_offset)
return phred_offset, phred_range
def _get_nth_sequence(generator, seq_num):
# i is set to None so that an empty generator will not result in an
# undefined variable when compared to seq_num.
i = None
if seq_num is None or seq_num < 1:
raise ValueError('Invalid sequence number (`seq_num`=%s). `seq_num`'
' must be between 1 and the number of sequences in'
' the file.' % str(seq_num))
try:
for i, seq in zip(range(1, seq_num + 1), generator):
pass
finally:
generator.close()
if i == seq_num:
return seq
raise ValueError('Reached end of file before finding the %s sequence.'
% cardinal_to_ordinal(seq_num))
def _parse_fasta_like_header(line):
id_ = ''
desc = ''
header = line[1:].rstrip()
if header:
if header[0].isspace():
# no id
desc = header.lstrip()
else:
header_tokens = header.split(None, 1)
if len(header_tokens) == 1:
# no description
id_ = header_tokens[0]
else:
id_, desc = header_tokens
return id_, desc
def _format_fasta_like_records(generator, id_whitespace_replacement,
description_newline_replacement, require_qual):
if ((id_whitespace_replacement is not None and
'\n' in id_whitespace_replacement) or
(description_newline_replacement is not None and
'\n' in description_newline_replacement)):
raise ValueError(
"Newline character (\\n) cannot be used to replace whitespace in "
"sequence IDs, nor to replace newlines in sequence descriptions.")
for idx, seq in enumerate(generator):
if len(seq) < 1:
raise ValueError(
"%s sequence does not contain any characters (i.e., it is an "
"empty/blank sequence). Writing empty sequences is not "
"supported." % cardinal_to_ordinal(idx + 1))
id_ = seq.id
if id_whitespace_replacement is not None:
id_ = _whitespace_regex.sub(id_whitespace_replacement, id_)
desc = seq.description
if description_newline_replacement is not None:
desc = _newline_regex.sub(description_newline_replacement, desc)
if desc:
header = '%s %s' % (id_, desc)
else:
header = id_
if require_qual and seq.quality is None:
raise ValueError(
"Cannot write %s sequence because it does not have quality "
"scores associated with it." % cardinal_to_ordinal(idx + 1))
yield header, str(seq), seq.quality
def _line_generator(fh, skip_blanks=False):
for line in fh:
line = line.strip()
if line or not skip_blanks:
yield line
def _too_many_blanks(fh, max_blanks):
count = 0
too_many = False
for line in _line_generator(fh, skip_blanks=False):
if line:
break
else:
count += 1
if count > max_blanks:
too_many = True
break
fh.seek(0)
return too_many
|
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseNotFound, JsonResponse
from django.conf import settings
from django_countries import countries
from timeSeries.models import Series, Value, Forecast, SatelliteData, Location, DataProvider, DataType, User
from .decoder import decode
#===============================================================================
# from django.utils.safestring import mark_safe
#===============================================================================
from django.core.files.base import ContentFile
from django.core.urlresolvers import reverse
from celery import task, current_task
from celery.result import AsyncResult
from djcelery.models import PeriodicTask, IntervalSchedule
from gpu.manager import Manager, getResampledTime, getDateList, fGenerateLeads, fSecond, fMinute, fHour, fDay, fWeek, fMonth, fYear
from . import satelliteData #@UnusedImport
from django.core.exceptions import ObjectDoesNotExist
import binascii
import json
import warnings
import os
import datetime as dt
import dateutil.parser
import sys
import numpy as np
import matplotlib.pyplot as plt
import mpld3
from gpu.functions import plotQQ, ClickInfo
def viewSeries(request, seriesList):
context = {'LANG': request.LANGUAGE_CODE,
'LOCAL_JAVASCIPT': settings.LOCAL_JAVASCIPT,
}
seriesDict = {}
errorsDict = {'missing': list(),
'noAccess': list(),
'noData': list(),
}
seriesList=seriesList.split('/')
# find recent dates
recentDates = []
for seriesName in seriesList:
tmpResult = Series.objects.filter(name=seriesName)
if len(tmpResult)>0:
s0 = tmpResult[0]
recentDates.append(Value.objects.filter(series=s0.id).order_by('date').last().date)
recentDate = max(recentDates)
recentDate = recentDate.replace(year=recentDate.year-2)
# retrieve series' info
for seriesName in seriesList:
tmpResult = Series.objects.filter(name=seriesName)
if len(tmpResult)>0:
s0 = tmpResult[0]
result = Value.objects.filter(series=s0.id).order_by('date')
if tmpResult[0].encryptionKey==None:
values = [{'x':obj.date.isoformat(), 'y':str(obj.recordOpen)} for obj in result.filter(date__gte=recentDate)]
else:
values = [{'x':obj.date.isoformat(), 'y':binascii.b2a_base64(obj.record).decode("utf-8")} for obj in result.filter(date__gte=recentDate)]
forecasts = {}
for f0 in Forecast.objects.filter(targetSeries=s0.id).filter(ready=True):
forecasts[f0.name] = {}
forecasts[f0.name]['urlForecast'] = '/timeSeries' + reverse('forecast', 'timeSeries.urls', kwargs={'forecastName': f0.name})
forecasts[f0.name]['urlHindcast'] = '/timeSeries' + reverse('hindcast', 'timeSeries.urls', kwargs={'forecastName': f0.name})
forecasts[f0.name]['description'] = f0.description
forecasts[f0.name]['leadTime'] = f0.leadTime
forecasts[f0.name]['seasons'] = f0.splitBySeason
if s0.location.country in dict(countries):
tmpCountry = dict(countries)[s0.location.country]
else:
tmpCountry = ''
tmp = {'id': s0.id,
'name': s0.name,
'type': s0.type.name,
'provider': s0.provider.name,
'providerAbbreviation': s0.provider.abbreviation,
'providerIcon': '/' + str(s0.provider.icon),
'providerWebpage': s0.provider.website,
'units': s0.type.units,
'typeIcon': '/' + str(s0.type.icon),
'lat': float(s0.location.lat),
'lon': float(s0.location.lon),
'location': s0.location.name,
'quality': s0.quality,
'timeStepUnits': dict(Series.TIME_STEP_PERIOD_TYPE)[s0.timeStepUnits],
'timeStepPeriod': s0.timeStepPeriod,
'encryptionKey': s0.encryptionKey,
'metaEncrypted': s0.metaEncrypted,
'river': s0.location.river,
'country': tmpCountry,
'catchment': s0.location.catchment,
'values': values,
'records': len(result),
'forecasts': forecasts,
}
if len(result)==0:
errorsDict['noData'].append(seriesName)
tmp.update({'minDate': '',
'maxDate': ''})
else:
tmp.update({'minDate': result.first().date.isoformat(),
'maxDate': result.last().date.isoformat()})
seriesDict[str(s0)] = tmp
else:
errorsDict['missing'].append(seriesName)
context['series'] = json.dumps(seriesDict)
context['errors'] = json.dumps(errorsDict)
# fields
fields = (('Id', 'name'),
('Location', 'location'),
('River', 'river'),
('Catchment', 'catchment'),
('Type', 'type'),
('Units', 'units'),
('Time step', 'timeStepUnits'),
('Records', 'records'),
('From', 'minDate'),
('To', 'maxDate'),
)
context['fields'] = json.dumps(fields)
return render(request, 'timeSeries/viewSeries.html', context)
def deleteTimeSeries(request, seriesName):
series = Series.objects.filter(name=seriesName)
if len(series)==0:
context = {'message': ('error', 'The series ' + seriesName + ' has not been found in the database.')}
else:
tmp = Value.objects.filter(series=series).count()
if tmp>0:
Value.objects.filter(series=series).delete()
context = {'message': ('success', str(tmp) + ' records successfully deleted.')}
else:
context = {'message': ('warning', 'No data to be deleted.')}
return HttpResponse(
json.dumps(context),
content_type="application/json"
)
def upload(request, seriesName):
series = Series.objects.filter(name=seriesName)
if series:
provider = series[0].provider
location = series[0].location
seriesType = series[0].type
result = Value.objects.filter(series=series[0].id).order_by('date')
values = [{'x':obj.date.isoformat(), 'y':binascii.b2a_base64(obj.record).decode("utf-8")} for obj in result]
context = {'LANG': request.LANGUAGE_CODE,
'series': series[0].name,
'encryptionKey': series[0].encryptionKey,
'metaEncrypted': series[0].metaEncrypted,
'timeStep': dict(Series.TIME_STEP_PERIOD_TYPE)[series[0].timeStepUnits],
'timeStepPeriod': series[0].timeStepPeriod,
'provider': str(provider),
'type': str(seriesType),
'units': seriesType.units,
'location': str(location),
'data': json.dumps(values),
'LOCAL_JAVASCIPT': settings.LOCAL_JAVASCIPT,
}
return render(request, 'timeSeries/uploadValues.html', context)
else:
return HttpResponseNotFound('Series [' + seriesName + '] not found...')
def uploadTimeSeries(request, seriesName):
# TODO: check provider pass
# TODO: check duplicated values
data = json.loads(request.POST.get('toUpload'))
seriesObj = Series.objects.get(name=seriesName)
warnings.filterwarnings('ignore', '.*Invalid utf8 character string.*',)
toInput = list()
for i0, d0 in enumerate(data):
toInput.append(Value(series=seriesObj, date=d0['date'][:-1], record=binascii.a2b_base64(d0['value'])))
if i0 % 1000==0 and i0!=0:
Value.objects.bulk_create(toInput)
toInput = list()
Value.objects.bulk_create(toInput)
context = {'message': 'done!'}
return HttpResponse(
json.dumps(context),
content_type="application/json"
)
def seriesData(request):
context = {}
if request.method == 'POST':
seriesObj = Series.objects.get(name=request.POST.get('series'))
provider = seriesObj.provider
location = seriesObj.location
seriesType = seriesObj.type
context = {'location': str(location),
'provider': str(provider),
'type': str(seriesType),
'units': seriesType.units,
'timeStepUnits': dict(Series.TIME_STEP_PERIOD_CHOICES)[seriesObj.timeStepUnits],
'timeStepPeriod': seriesObj.timeStepPeriod,
'metaEncrypted': seriesObj.metaEncrypted,
'encryptionKey': seriesObj.encryptionKey,
'name': seriesObj.name,
}
return HttpResponse(
json.dumps(context),
content_type="application/json"
)
def getValues(request):
context = {}
if request.method == 'POST':
s0 = Series.objects.get(name=request.POST.get('series'))
dateFrom = dt.datetime.strptime(request.POST.get('from'), "%a, %d %b %Y %H:%M:%S %Z").replace(tzinfo = None)
dateTo = dt.datetime.strptime(request.POST.get('to'), "%a, %d %b %Y %H:%M:%S %Z").replace(tzinfo = None)
result = Value.objects.filter(series=s0, date__gte=dateFrom, date__lt=dateTo).order_by('date')
if not result:
tmp = Value.objects.filter(series=s0, date__lt=dateFrom)
if tmp:
tmp = tmp.latest('date')
reference = tmp.date
dateFrom = reference.replace(year=reference.year-1)
result = Value.objects.filter(series=s0).filter(date__gte=dateFrom, date__lt=dateTo).order_by('date')
if s0.encryptionKey==None:
values = [{'x':obj.date.isoformat(), 'y':str(obj.recordOpen)} for obj in result]
else:
values = [{'x':obj.date.isoformat(), 'y':binascii.b2a_base64(obj.record).decode("utf-8")} for obj in result]
context = {'values': values}
return JsonResponse(context)
def fJumpDateFun (period):
tmp = {'seconds': fSecond,
'minutes': fMinute,
'hours': fHour,
'days': fDay,
'weeks': fWeek,
'months': fMonth,
'years': fYear,
}
return tmp[period]
def fGetForecastData(forecastName, fromDate=None, toDate=None):
forecast = Forecast.objects.filter(name=forecastName)
timeStepUnits = forecast[0].targetSeries.TIME_STEP_DICT[forecast[0].targetSeries.timeStepUnits]
timeStepPeriod = forecast[0].targetSeries.timeStepPeriod
forecastTimeDelta = {timeStepUnits: timeStepPeriod}
if fromDate==None:
fromDate = Value.objects.filter(series=forecast[0].targetSeries.id).earliest('date').date
if toDate==None:
toDate = Value.objects.filter(series=forecast[0].targetSeries.id).latest('date').date
if forecast:
records = Value.objects.filter(series=forecast[0].targetSeries.id, date__gte=fromDate, date__lte=toDate).order_by('date')
if forecast[0].targetSeries.encryptionKey==None:
values = [float(r0.recordOpen) for r0 in records]
else:
values = decode([r0.record for r0 in records], forecast[0].targetSeries.encryptionKey)
dates = [str(r0.date) for r0 in records]
target = (dates, values)
extra = []
datesDesired = None
fromDateExtra = fromDate.replace(year=fromDate.year-1)
toDateExtra = toDate.replace(year=toDate.year+1)
for s0 in forecast[0].extraSeries.filter().order_by('id'):
timeStepUnits = s0.TIME_STEP_DICT[s0.timeStepUnits]
timeStepPeriod = s0.timeStepPeriod
extraTimeDelta = {timeStepUnits: timeStepPeriod}
if datesDesired==None and extraTimeDelta!=forecastTimeDelta:
datesDesired = getDateList(fromDate, toDate, **forecastTimeDelta)
records = Value.objects.filter(series=s0).filter(date__gte=fromDateExtra).filter(date__lte=toDateExtra).order_by('date')
if s0.encryptionKey==None:
values = [float(r0.recordOpen) for r0 in records]
else:
values = decode([r0.record for r0 in records], s0.encryptionKey)
dates = [r0.date for r0 in records]
if extraTimeDelta!=forecastTimeDelta:
datesExpected = getDateList(max(fromDateExtra, dates[0].replace(year=dates[0].year-1)), min(toDateExtra, dates[-1].replace(year=dates[-1].year+1)), **extraTimeDelta)
dates, values = getResampledTime(datesDesired, datesExpected, dates, values)
dates = [str(d0) for d0 in dates]
extra.append((dates, [str(v0) for v0 in values]))
if len(extra)==0:
extra=None
return (target, extra)
else:
return False
def trainForecastBase(request, forecastName):
forecasts = Forecast.objects.filter(name=forecastName)
if forecasts:
forecast = forecasts[0]
if forecast.jobId == '':
if forecast.forecastFile.name != '' and os.path.isfile(forecast.forecastFile.path):
os.remove(forecast.forecastFile.path)
forecast.forecastFile.save(forecastName + '.gpu', ContentFile('dummy content'))
info = {'leadTime': forecast.leadTime,
'seasons': forecast.splitBySeason,
'nodes': forecast.nodes,
'dataFunction': forecast.dataExpression,
'targetFunction': forecast.targetExpression,
'population': forecast.population,
'epochs': forecast.epochs,
'regularization': float(forecast.regularize),
'filePath': forecast.forecastFile.path,
'name': forecast.name,
'referenceDate': forecast.referenceDate.isoformat(),
'activationFunction': forecast.type,
'valFraction': 1-forecast.training/100,
'timeStepUnit': forecast.targetSeries.TIME_STEP_DICT[forecast.targetSeries.timeStepUnits],
'timeStepSize': forecast.targetSeries.timeStepPeriod,
'weigthRange': float(forecast.weigthRange),
'errorFunction': forecast.errorFunction,
'transformWeights': forecast.transformWeights,
'allowNegative': forecast.allowNegative,
'reduceTraining': float(forecast.reduceTraining),
'inertia': float(forecast.psoC0),
'c1': float(forecast.psoC1),
'c2': float(forecast.psoC2),
'c3': float(forecast.psoC3),
'forceNonExceedance': float(forecast.forceNonExceedance),
'trainingDates': forecast.trainingDates,
}
target, extra = fGetForecastData(forecastName)
#===================================================================
# jobId = 1
# forecasts.update(ready=False, jobId=jobId)
# trainWrapper(info, target, extra)
#===================================================================
jobId = trainWrapper.delay(info, target, extra).id
forecasts.update(ready=False, jobId=jobId)
else:
jobId=forecast.jobId
context = {'LANG': request.LANGUAGE_CODE,
'LOCAL_JAVASCIPT': settings.LOCAL_JAVASCIPT,
'name': forecast.name,
'jobId': jobId,
}
return render(request, 'timeSeries/trainProgress.html', context)
else:
return HttpResponseNotFound('Forecast [' + forecastName + '] not found...')
@task(name='train')
def trainWrapper(info, data, extra):
try:
data = [np.array([np.datetime64(s0) for s0 in data[0]]), data[1]]
if extra!=None:
for i0 in range(len(extra)):
extra[i0] = [np.array([np.datetime64(s0) for s0 in extra[i0][0]]), extra[i0][1]]
man = Manager(data, extra=extra,
dataFunction=info['dataFunction'], targetFunction=info['targetFunction'],
nodes=info['nodes'], seasons=info['seasons'], population=info['population'],
epochs=info['epochs'], regularization=info['regularization'], refTime=dateutil.parser.parse(info['referenceDate']),
leads=fGenerateLeads(info['leadTime']), displayEach=25,
openClPlatform=settings.OPENCL_PLATFORM, openClDevice=settings.OPENCL_DEVICE,
activationFunction=info['activationFunction'], valFraction=info['valFraction'],
timeStepUnit=info['timeStepUnit'], timeStepSize=info['timeStepSize'], weigthRange=info['weigthRange'],
errorFunction=info['errorFunction'], transformWeights=info['transformWeights'],
allowNegative=info['allowNegative'],
reduceTraining=info['reduceTraining'], forecastName=info['name'],
inertia=info['inertia'], c1=info['c1'], c2=info['c2'], c3=info['c3'],
forceNonExceedance=info['forceNonExceedance'], trainingDates=info['trainingDates'],
)
man.train()
man.save(info['filePath'])
Forecast.objects.filter(name=info['name']).update(ready=True, jobId='')
return 'DONE'
except Exception as ex:
Forecast.objects.filter(name=info['name']).update(ready=False, jobId='')
raise(ex)
#===============================================================================
# def trainForecastRun(request, forecastName):
# forecast = Forecast.objects.filter(name=forecastName)
# if forecast:
# forecast.update(ready=False)
#
# if forecast[0].forecastFile.name != '' and os.path.isfile(forecast[0].forecastFile.path):
# os.remove(forecast[0].forecastFile.path)
# forecast[0].forecastFile.save(forecastName + '.gpu', ContentFile('dummy content'))
#
# info = {'leadTime': forecast[0].leadTime,
# 'seasons': forecast[0].splitBySeason,
# 'nodes': forecast[0].nodes,
# 'dataFunction': forecast[0].dataExpression,
# 'targetFunction': forecast[0].targetExpression,
# 'population': forecast[0].population,
# 'epochs': forecast[0].epochs,
# 'regularization': float(forecast[0].regularize),
# 'filePath': forecast[0].forecastFile.path,
# 'name': forecast[0].name,
# 'referenceDate': forecast[0].referenceDate.isoformat(),
# 'activationFunction': forecast[0].type,
# 'valFraction': 1-forecast[0].training/100,
# 'timeStepUnit': forecast[0].targetSeries.TIME_STEP_DICT[forecast[0].targetSeries.timeStepUnits],
# 'timeStepSize': forecast[0].targetSeries.timeStepPeriod,
# 'weigthRange': float(forecast[0].weigthRange),
# 'errorFunction': forecast[0].errorFunction,
# 'transformWeights': forecast[0].transformWeights,
# 'allowNegative': forecast[0].allowNegative,
# 'reduceTraining': float(forecast[0].reduceTraining),
# }
#
# target, extra = fGetForecastData(forecastName)
#
# #=======================================================================
# # trainWrapper(info, target, extra)
# # context = {'job': 1}
# #=======================================================================
#
# job = trainWrapper.delay(info, target, extra)
# context = {'job': job.id}
#
# return JsonResponse(context)
#===============================================================================
def trainForecastProgress(request, forecastName):
try:
forecast = Forecast.objects.get(name=forecastName)
jobId = forecast.jobId
plotCounter = request.POST.get('plotCounter')
if jobId=='':
jobId = request.POST.get('jobId')
if jobId =='':
jobId = None
if jobId==None:
context = {'state': 'UNKNOWN'}
else:
job = AsyncResult(jobId)
if job.state=='PENDING':
context = {'jobId': jobId,
'state': 'PENDING',
}
elif job.state=='SUCCESS' and job.result=='DONE':
context = {'jobId': jobId,
'state': 'DONE',
}
elif job.state=='PROGRESS':
context = job.result
if 'plot' in context.keys() and int(plotCounter)==context['plotCounter']:
context.pop('plot')
context.pop('plotCounter')
context['jobId'] = jobId
context['state'] = 'PROGRESS'
elif job.state=='FAILURE':
raise Exception
except Exception as ex:
print(str(ex))
context = {'message': ('error', str(ex))}
return JsonResponse(context)
def trainCancel(request, forecastName):
forecasts = Forecast.objects.filter(name=forecastName)
forecasts.update(ready=False, jobId='')
return JsonResponse({'success': 'SUCCESS'})
raise(Exception)
def forecast(request, forecastName):
errorMsg = ''
forecast = Forecast.objects.filter(name=forecastName)
if forecast:
tmp = dt.datetime.strptime(request.POST.get('reference'), "%a, %d %b %Y %H:%M:%S %Z").replace(tzinfo = None)
try:
larger = Value.objects.filter(series=forecast[0].targetSeries.id).filter(date__gte=tmp).earliest('date').date
except ObjectDoesNotExist:
larger = Value.objects.filter(series=forecast[0].targetSeries.id).latest('date').date
if larger!=tmp:
try:
smaller = Value.objects.filter(series=forecast[0].targetSeries.id).filter(date__lte=tmp).latest('date').date
except ObjectDoesNotExist:
smaller = Value.objects.filter(series=forecast[0].targetSeries.id).earliest('date').date
interval = (smaller, larger)
if np.argmin(np.abs(np.array(interval)-tmp))==0:
reference = smaller
else:
reference = larger
else:
reference = larger
fJumpFun = fJumpDateFun(forecast[0].period)
target, extra = fGetForecastData(forecastName, fJumpFun(reference,-2), reference)
target = [np.array([np.datetime64(s0) for s0 in target[0]]), target[1]]
if extra!=None:
for i0 in range(len(extra)):
extra[i0] = [np.array([np.datetime64(s0) for s0 in extra[i0][0]]), extra[i0][1]]
man = Manager(target, extra=extra, forecast=True)
man.load(forecast[0].forecastFile.path, openClPlatform=settings.OPENCL_PLATFORM, openClDevice=settings.OPENCL_DEVICE)
res = man.forecast(np.datetime64(reference), data=target, extra=extra)
selectBands = (1,3,5,7,8,10,12,14)
res['bands'] = res['bands'][selectBands,]
res['simulations'] = res['simulations'][:,selectBands]
if sys.platform=='linux':
tmpDates = [str(d0)[0:-5] for d0 in res['dates']]
else:
tmpDates = [str(d0) for d0 in res['dates']]
context = {'bands': res['bands'].tolist(),
'dates': tmpDates,
'values': np.transpose(res['simulations']).tolist(),
'targets': [None if np.isnan(t0) else t0 for t0 in res['targets'].tolist()],
'timeStepUnits': dict(Series.TIME_STEP_PERIOD_TYPE)[forecast[0].targetSeries.timeStepUnits],
'timeStepPeriod': forecast[0].targetSeries.timeStepPeriod,
'error': errorMsg,
}
return JsonResponse(context)
def getTrainingPeriods(request, forecastName):
context = {}
forecasts = Forecast.objects.filter(name=forecastName)
if forecasts:
forecast = forecasts[0]
trainingDates = forecast.trainingDates
data, extra = fGetForecastData(forecastName)
data = [np.array([np.datetime64(s0) for s0 in data[0]]), data[1]]
if extra!=None:
for i0 in range(len(extra)):
extra[i0] = [np.array([np.datetime64(s0) for s0 in extra[i0][0]]), extra[i0][1]]
man = Manager(data, extra=extra,
dataFunction=forecast.dataExpression, targetFunction=forecast.targetExpression,
nodes=forecast.nodes, seasons=forecast.splitBySeason, population=forecast.population,
epochs=forecast.epochs, regularization=forecast.regularize, refTime=forecast.referenceDate,
leads=fGenerateLeads(forecast.leadTime), displayEach=25,
openClPlatform=settings.OPENCL_PLATFORM, openClDevice=settings.OPENCL_DEVICE,
activationFunction=forecast.type, valFraction=1-forecast.training/100,
timeStepUnit=forecast.targetSeries.TIME_STEP_DICT[forecast.targetSeries.timeStepUnits],
timeStepSize=forecast.targetSeries.timeStepPeriod, weigthRange=float(forecast.weigthRange),
errorFunction=forecast.errorFunction, transformWeights=forecast.transformWeights,
allowNegative=forecast.allowNegative, reduceTraining=forecast.reduceTraining, forecastName=forecast.name,
inertia=forecast.psoC0, c1=forecast.psoC1, c2=forecast.psoC2, c3=forecast.psoC2,
forceNonExceedance=forecast.forceNonExceedance, trainingDates=trainingDates,
)
groups = man.data.splitGroup
dateGroups = []
dataGroups =[]
dataPos = []
groupNumbers = np.unique(groups)
validGroups = []
for g0 in groupNumbers:
idxs = groups==g0
values = man.data.values[idxs]
tmp = np.logical_not(np.isnan(values))
if np.sum(tmp)>0:
dataGroups.append(values[tmp])
dateGroups.append(man.data.dates[idxs][tmp])
dataPos.append(g0)
validGroups.append(g0)
dataPos = np.array(dataPos)
tmp = [d0[-1]-d0[0] for d0 in dateGroups]
widths = []
for w0 in tmp:
widths.append(w0/max(tmp))
fig = plt.figure(figsize=(12, 3))
plotAx = fig.add_subplot(1, 1, 1)
violinParts = plotAx.violinplot(dataGroups, dataPos, points=40, widths=widths, showmeans=False, showextrema=False, showmedians=False)
training = []
for i0, g0 in enumerate(validGroups):
if g0 in man.data.splitTra:
training.append(True)
else:
training.append(False)
#===============================================================
# val[2].append(dateGroups[i0][0].astype(dt.datetime).strftime('%Y.%m.%d %H:%M:%S') + '-' + dateGroups[i0][-1].astype(dt.datetime).strftime('%Y.%m.%d %H:%M:%S'))
#===============================================================
#=======================================================================
# pointsVal = plotAx.scatter(val[0], val[1], s=40, alpha=0.8, color='#f5dd5d', label='Validation')
# pointsTra = plotAx.scatter(tra[0], tra[1], s=80, alpha=0.8, color='#be7429', label='Training')
#=======================================================================
# TODO: Change so that it works also with sub-daily
# FIX: Correct the split into periods
plotAx.get_xaxis().set_ticks([])
plotAx.set_xlim([dataPos[0]-1,dataPos[-1]+1])
for i0, p0 in enumerate(violinParts['bodies']):
if training[i0]:
p0.set_facecolor('#417690')
else:
p0.set_facecolor('#f8f8f8')
p0.set_alpha(1);
styles = {True: {'fill': '#417690'},
False: {'fill': '#f8f8f8'}}
for i0, p0 in enumerate(violinParts['bodies']):
#=======================================================================
# tmp = dateGroups[i0][0].astype(dt.datetime).strftime('%Y.%m.%d %H:%M:%S') + ' - ' + dateGroups[i0][-1].astype(dt.datetime).strftime('%Y.%m.%d %H:%M:%S')
#=======================================================================
marker = dateGroups[i0][0].astype(dt.datetime).strftime('%Y.%m.%d %H:%M:%S')
mpld3.plugins.connect(fig, ClickInfo(p0, training[i0], styles, marker))
#===========================================================================
# mpld3.plugins.connect(fig, ClickInfo(pointsVal))
# mpld3.plugins.connect(fig, ClickInfo(pointsTra))
# mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(pointsVal, val[2]))
# mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(pointsTra, tra[2]))
#===========================================================================
plotDict = mpld3.fig_to_dict(fig)
plt.close(fig)
return JsonResponse({'plot': plotDict})
#===========================================================================
# return HttpResponse(mpld3.fig_to_html(fig))
#===========================================================================
#===========================================================================
# return JsonResponse(context)
#===========================================================================
def hindcast(request, forecastName):
forecast = Forecast.objects.filter(name=forecastName)
if forecast:
if request.POST['lead'][0]=='null':
lead = forecast[0].leadTime
else:
lead = float(request.POST['lead'])
dateFrom = dt.datetime.strptime(request.POST.get('from'), "%a, %d %b %Y %H:%M:%S %Z").replace(tzinfo = None)
dateTo = dt.datetime.strptime(request.POST.get('to'), "%a, %d %b %Y %H:%M:%S %Z").replace(tzinfo = None)
fJumpFun = fJumpDateFun(forecast[0].period)
dateFromExtended = fJumpFun(dateFrom, -2)
target, extra = fGetForecastData(forecastName, fromDate=dateFromExtended, toDate=dateTo)
target = [np.array([np.datetime64(s0) for s0 in target[0]]), target[1]]
if extra!=None:
for i0 in range(len(extra)):
extra[i0] = [np.array([np.datetime64(s0) for s0 in extra[i0][0]]), extra[i0][1]]
man = Manager(target, extra=extra, forecast=True)
man.load(forecast[0].forecastFile.path, openClPlatform=settings.OPENCL_PLATFORM, openClDevice=settings.OPENCL_DEVICE)
try:
res = man.hindcast(data=target, lead=lead, extra=extra, dateFrom=dateFrom)
#===================================================================
# toKeep = res['dates']>=dateFrom
# res['dates'] = res['dates'][toKeep]
# res['simulations'] = res['simulations'][toKeep,]
# res['targets'] = res['targets'][toKeep]
#===================================================================
jsonQQ = plotQQ(res['traQQ'], res['valQQ'], res['bands'])
selectBands = (1,3,5,7,8,10,12,14)
res['bands'] = res['bands'][selectBands,]
res['simulations'] = res['simulations'][:,selectBands]
#===================================================================
# {'dates': dates, 'simulations': leadSimulations, 'bands': bands, 'targets': leadTargets,
# 'traDates': traDates, 'traPerformance': traPerformance, 'valPerformance': valPerformance,
# 'traQQ': traQQ, 'valQQ': valQQ}
#===================================================================
#===================================================================
# trainingDates = man.data.dates[man.data.idxTra]
# trainingDates = trainingDates[np.logical_and(trainingDates>=np.datetime64(dateFrom), trainingDates<=np.datetime64(dateTo))]
#===================================================================
if sys.platform=='linux':
tmpDates = [str(d0)[0:-5] for d0 in res['dates']]
tmpTrainingDates = [str(d0)[0:-5] for d0 in res['traDates']]
else:
tmpDates = [str(d0) for d0 in res['dates']]
tmpTrainingDates = [str(d0) for d0 in res['traDates']]
context = {'bands': res['bands'].tolist(),
'dates': tmpDates,
'values': np.transpose(res['simulations']).tolist(),
'targets': [None if np.isnan(t0) else t0 for t0 in res['targets'].tolist()],
'timeStepUnits': dict(Series.TIME_STEP_PERIOD_TYPE)[forecast[0].targetSeries.timeStepUnits],
'timeStepPeriod': forecast[0].targetSeries.timeStepPeriod,
'trainingDates': tmpTrainingDates,
'QQ': jsonQQ,
'traPerformance': '<p>Training : α:%f, ξ:%f, π:%f</p>' % res['traPerformance'],
'valPerformance': '<p>Validation: α:%f, ξ:%f, π:%f</p>' % res['valPerformance'],
}
return JsonResponse(context)
except Exception as ex:
return JsonResponse({'status':'false','message': str(ex)}, status=500)
@task(name='storeSatelliteData')
def storeSatelliteDataWrapper(name):
try:
current_task.update_state(state='PROGRESS', meta={'message': ('warning', 'Starting...'),
'progress': 0,
'state': 'PROGRESS'})
except Exception:
pass
satelliteObj = SatelliteData.objects.get(name=name)
satellite = satelliteObj.satellite
dateIni = satelliteObj.startDate
dateEnd = dt.datetime.now()
geometryFile = satelliteObj.geometry.path
dataFolder = satelliteObj.dataFolder #@UnusedVariable
downloadFolder = os.path.join(settings.SATELLITE_DOWNLOAD, satellite) #@UnusedVariable
jsonGeometry = satelliteObj.jsonGeometry
username = satelliteObj.username #@UnusedVariable
password = satelliteObj.password #@UnusedVariable
downloadThreads = satelliteObj.downloadThreads
readThreads = satelliteObj.readThreads
satelliteInstance = eval('satelliteData.' + satellite + '(dataFolder=dataFolder, downloadFolder=downloadFolder, username=username, password=password)')
satelliteObj.lastRecord = satelliteInstance.store(dateIni=dateIni, dateEnd=dateEnd, geometryFile=geometryFile, geometryStr=jsonGeometry, downloadThreads=downloadThreads, readThreads=readThreads)
# Handle geometry
if not satelliteObj.readyGeometry or satelliteObj.jsonGeometry=='':
satelliteObj.jsonGeometry = satelliteInstance.getGeometryInfo()
satelliteObj.readyGeometry = True
user = User.objects.get(username='tethys')
# Introduce associated location
locations = Location.objects.filter(name='sat_' + name)
if locations:
location = locations[0]
else:
tmp = json.loads(satelliteObj.jsonGeometry)
location = Location(name='sat_' + name,
lat=np.mean(np.array(tmp['lat'])[tmp['idxReduced'][0]]),
lon=np.mean(np.array(tmp['lon'])[tmp['idxReduced'][1]]),
observations='Generated automatically from satellite data aggregation.',
introducedBy=user,
)
location.save()
satelliteObj.location = location
# Introduce associated series
serieS = Series.objects.filter(name='sat_' + name)
if serieS:
series = serieS[0]
else:
timestepDict = {'minutes': 'm',
'hours': 'h',
'days': 'd',
'weeks': 'w',
'months': 'M',
'years': 'Y'}
timeStepUnits=timestepDict[list(satelliteInstance.timestep.keys())[0]]
timeStepPeriod=satelliteInstance.timestep[list(satelliteInstance.timestep.keys())[0]]
series = Series(name='sat_' + name,
location=satelliteObj.location,
provider=DataProvider.objects.get(name='tethys'),
type=DataType.objects.get(name='Satellite data aggregation'),
timeStepUnits=timeStepUnits,
timeStepPeriod=timeStepPeriod,
encryptionKey=None,
observations='Generated automatically from satellite data aggregation.',
introducedBy=user,
)
series.save()
satelliteObj.series = series
# Erase and re-introduce associated values
Value.objects.filter(series=series).delete()
print('Values deleted')
aggregateSatelliteData(satelliteObj, satelliteInstance)
print('Values inserted')
# Update satellite entry
satelliteObj.jobId = None
satelliteObj.save()
return 'DONE'
def storeSatelliteDataProgress(request, name):
try:
satelliteObj = SatelliteData.objects.get(name=name)
jobId = satelliteObj.jobId
if jobId==None:
jobId = request.POST.get('jobId')
if jobId =='':
jobId = None
if jobId==None:
context = {'state': 'UNKNOWN'}
else:
job = AsyncResult(jobId)
data = job.result or job.state
if data=='PENDING':
context = {'jobId': jobId,
'state': 'PENDING',
'message': ('warning', 'Please be patient. Waiting on other processes in queue...')}
elif data=='DONE' or ('state' in data.keys() and data['state']=='DONE'):
context = {'jobId': jobId,
'state': 'DONE',
'message': ('success', 'The process is complete.')}
else:
context = data
context['jobId'] = jobId
except Exception as ex:
print(str(ex))
context = {'message': ('error', str(ex))}
return JsonResponse(context)
def storeSatelliteData(request, name):
# reviews all the history of the satellite product
satelliteObj = SatelliteData.objects.filter(name=name)
if not satelliteObj:
context = {'message': ('error', 'The satellite data "' + name + '" has not been found in the database.')}
else:
job = storeSatelliteDataWrapper.delay(name)
satelliteObj[0].jobId = job.id
satelliteObj[0].save()
#=======================================================================
# storeSatelliteDataWrapper(name)
# satelliteObj = SatelliteData.objects.filter(name=name)
# satelliteObj[0].jobId = None
#=======================================================================
context = {'jobId': satelliteObj[0].jobId,
'message': ('warning', 'Starting data preparation...'),
'state': 'PROGRESS'}
# Add celery periodic task
intervalSchedules = IntervalSchedule.objects.filter(period='hours', every='2')
if intervalSchedules:
intervalSchedule = intervalSchedules[0]
else:
intervalSchedule = IntervalSchedule(period='hours', every='2')
intervalSchedule.save()
periodicTasks = PeriodicTask.objects.filter(name=name + ' Update')
if not periodicTasks:
periodicTask = PeriodicTask(name=name + ' Update', task='updateSatelliteData', interval=intervalSchedule, args='["' + name + '"]')
periodicTask.save()
return JsonResponse(context)
@task(name='updateSatelliteData')
def updateSatelliteDataWrapper(name):
try:
current_task.update_state(state='PROGRESS', meta={'message': ('warning', 'Starting...'),
'progress': 0,
'state': 'PROGRESS'})
except Exception:
pass
satelliteObj = SatelliteData.objects.get(name=name)
satellite = satelliteObj.satellite
geometryFile = satelliteObj.geometry.path
dataFolder = satelliteObj.dataFolder #@UnusedVariable
downloadFolder = os.path.join(settings.SATELLITE_DOWNLOAD, satellite) #@UnusedVariable
jsonGeometry = satelliteObj.jsonGeometry
username = satelliteObj.username #@UnusedVariable
password = satelliteObj.password #@UnusedVariable
downloadThreads = satelliteObj.downloadThreads
readThreads = satelliteObj.readThreads
satelliteInstance = eval('satelliteData.' + satellite + '(dataFolder=dataFolder, downloadFolder=downloadFolder, username=username, password=password)')
satelliteObj.lastRecord = satelliteInstance.update(geometryFile=geometryFile, geometryStr=jsonGeometry, downloadThreads=downloadThreads, readThreads=readThreads)
# Introduce new values
aggregateSatelliteData(satelliteObj, satelliteInstance)
# Update satellite entry
satelliteObj.jobId = None
satelliteObj.save()
return 'DONE'
def updateSatelliteData(request, name):
# only looks for recent data
satelliteObj = SatelliteData.objects.filter(name=name)
if len(satelliteObj)==0:
context = {'message': ('error', 'The satellite data "' + name + '" has not been found in the database.')}
else:
job = storeSatelliteDataWrapper.delay(name)
satelliteObj[0].jobId = job.id
satelliteObj[0].save()
#=======================================================================
# storeSatelliteDataWrapper(name)
# satelliteObj = SatelliteData.objects.filter(name=name)
# satelliteObj[0].jobId = None
#=======================================================================
satelliteObj[0].save()
context = {'jobId': satelliteObj[0].jobId,
'message': ('warning', 'Starting data update...'),
'state': 'PROGRESS'}
return JsonResponse(context)
def getSatelliteData(request):
# Get satellite data for display from the database
data = json.loads(request.POST.get('data'))
name = data['name']
info = data['info']
datetimes = data['datetimes']
satelliteObj = SatelliteData.objects.get(name=name)
satellite = satelliteObj.satellite
dataFolder = satelliteObj.dataFolder #@UnusedVariable
downloadFolder = os.path.join(settings.SATELLITE_DOWNLOAD, satellite) #@UnusedVariable
satelliteInstance = eval('satelliteData.' + satellite + '(dataFolder=dataFolder, downloadFolder=downloadFolder)')
if info:
data = satelliteInstance.getDataForJSON(dateIni=dateutil.parser.parse(datetimes[0]), dateEnd=dateutil.parser.parse(datetimes[-1]))
else:
data = satelliteInstance.getDataForJSON(dateIni=dateutil.parser.parse(datetimes[0]), dateEnd=dateutil.parser.parse(datetimes[-1]), returnInfo=False)
data['name'] = name
data['dates'] = [s0 + '.000Z' for s0 in data['dates']]
for i0 in range(len(data['dates'])-1,-1,-1):
if data['dates'][i0] not in datetimes:
data['data'].pop(i0)
data['dates'].pop(i0)
return HttpResponse(
json.dumps(json.dumps(data)),
content_type="application/json"
)
def aggregateSatelliteData(satelliteObj, satelliteInstance):
series = Series.objects.get(name='sat_' + satelliteObj.name)
# Get the list of already aggregated records
storedDates = [v0.date for v0 in Value.objects.filter(series=series).order_by('date')]
# Get the list of all possible records
possibleDates = satelliteInstance.filePeriod(dateIni=satelliteObj.startDate, dateEnd=satelliteObj.lastRecord)
# Create missing list
tmp0 = np.array(satelliteInstance.ismember(storedDates, possibleDates))
tmp1 = np.ones(len(possibleDates), dtype=np.bool)
if tmp0.size>0:
tmp1[tmp0] = False
tmp0 = np.array(possibleDates)
missingDates = tmp0[tmp1].tolist()
# retrieve values
records = satelliteInstance.aggregate(missingDates, geometryStr=satelliteObj.jsonGeometry)
# store values
toInput = []
tmpPeriods = len(records);
for i0, d0 in enumerate(records):
if not np.isnan(d0):
toInput.append(Value(series=series, date=missingDates[i0], recordOpen=d0))
if i0 % 1000==0 and i0!=0:
try:
current_task.update_state(state='PROGRESS', meta={'message': ('warning', 'Saving to the database'),
'progress': i0/tmpPeriods,
'state': 'PROGRESS'})
except Exception:
pass
Value.objects.bulk_create(toInput)
toInput = []
Value.objects.bulk_create(toInput)
|
|
'''
Copyright (c) 2018 by Tobias Houska
This file is part of Statistical Parameter Optimization Tool for Python(SPOTPY).
:author: Tobias Houska
This file holds the standards for every algorithm.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from spotpy import database
from spotpy import parameter
import numpy as np
import time
import threading
try:
from queue import Queue
except ImportError:
# If the running python version is 2.* we have only Queue available as a multiprocessing class
# we need to stop the whole main process which this sleep for one microsecond otherwise the subprocess is not
# finished and the main process can not access it and put it as garbage away (Garbage collectors cause)
# However this slows down the whole simulation process and is a boring bug. Python3.x does not need this
# workaround
from Queue import Queue
class _RunStatistic(object):
"""
this class checks for each run if the objectivefunction got better and holds the
best parameter set.
Every _algorithm has an object of this class as status.
Usage:
status = _RunStatistic()
status(rep,like,params)
"""
def __init__(self, repetitions, algorithm_name, optimization_direction, parnames):
self.optimization_direction = optimization_direction #grid, mazimize, minimize
print('Initializing the ',algorithm_name,' with ',repetitions,' repetitions')
if optimization_direction == 'minimize':
self.compare = self.minimizer
print('The objective function will be minimized')
if optimization_direction == 'maximize':
self.compare = self.maximizer
print('The objective function will be minimized')
if optimization_direction == 'grid':
self.compare = self.grid
self.rep = 0
self.parnames = parnames
self.parameters= len(parnames)
self.params_min = [np.nan]*self.parameters
self.params_max = [np.nan]*self.parameters
self.objectivefunction_min = 1e308
self.objectivefunction_max = -1e308
self.starttime = time.time()
self.last_print = time.time()
self.repetitions = repetitions
self.stop = False
def minimizer(self, objval, params):
if objval < self.objectivefunction_min:
self.objectivefunction_min = objval
self.params_min = list(params)
def maximizer(self, objval, params):
if objval > self.objectivefunction_max:
self.objectivefunction_max = objval
self.params_max = list(params)
def grid(self, objval, params):
if objval < self.objectivefunction_min:
self.objectivefunction_min = objval
self.params_min = list(params)
if objval > self.objectivefunction_max:
self.objectivefunction_max = objval
self.params_max = list(params)
def __call__(self, objectivefunction, params, block_print=False):
self.rep+=1
if type(objectivefunction) == type([]): #TODO: change to iterable
self.compare(objectivefunction[0], params)
elif type(objectivefunction) == type(np.array([])):
pass
else:
self.compare(objectivefunction, params)
if self.rep == self.repetitions:
self.stop = True
if not block_print:
self.print_status()
def print_status(self):
# get str showing approximate timeleft to end of simulation in H, M, S
acttime = time.time()
# Refresh progressbar every two second
if acttime - self.last_print >= 2:
avg_time_per_run = (acttime - self.starttime) / (self.rep + 1)
timestr = time.strftime("%H:%M:%S", time.gmtime(round(avg_time_per_run * (self.repetitions - (self.rep + 1)))))
if self.optimization_direction == 'minimize':
text = '%i of %i, minimal objective function=%g, time remaining: %s' % (
self.rep, self.repetitions, self.objectivefunction_min, timestr)
if self.optimization_direction == 'maximize':
text = '%i of %i, maximal objective function=%g, time remaining: %s' % (
self.rep, self.repetitions, self.objectivefunction_max, timestr)
if self.optimization_direction == 'grid':
text = '%i of %i, min objf=%g, max objf=%g, time remaining: %s' % (
self.rep, self.repetitions, self.objectivefunction_min, self.objectivefunction_max, timestr)
print(text)
self.last_print = time.time()
def print_status_final(self):
print('\n*** Final SPOTPY summary ***')
print('Total Duration: ' + str(round((time.time() - self.starttime), 2)) + ' seconds')
print('Total Repetitions:', self.rep)
if self.optimization_direction == 'minimize':
print('Minimal objective value: %g' % (self.objectivefunction_min))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_min[i])
print(text)
if self.optimization_direction == 'maximize':
print('Maximal objective value: %g' % (self.objectivefunction_max))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_max[i])
print(text)
if self.optimization_direction == 'grid':
print('Minimal objective value: %g' % (self.objectivefunction_min))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_min[i])
print(text)
print('Maximal objective value: %g' % (self.objectivefunction_max))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_max[i])
print(text)
print('******************************\n')
def __repr__(self):
return 'Min objectivefunction: %g \n Max objectivefunction: %g' % (
self.objectivefunction_min, self.objectivefunction_max)
class _algorithm(object):
"""
Implements an algorithm.
Input
----------
spot_setup: class
model: function
Should be callable with a parameter combination of the parameter-function
and return an list of simulation results (as long as evaluation list)
parameter: function
When called, it should return a random parameter combination. Which can
be e.g. uniform or Gaussian
objectivefunction: function
Should return the objectivefunction for a given list of a model simulation and
observation.
evaluation: function
Should return the true values as return by the model.
dbname: str
Name of the database where parameter, objectivefunction value and simulation
results will be saved.
dbformat: str
ram: fast suited for short sampling time. no file will be created and results are saved in an array.
csv: A csv file will be created, which you can import afterwards.
parallel: str
seq: Sequentiel sampling (default): Normal iterations on one core of your cpu.
mpc: Multi processing: Iterations on all available cores on your (single) pc
mpi: Message Passing Interface: Parallel computing on high performance computing clusters, py4mpi needs to be installed
save_threshold: float or list
Compares the given value/list of values with return value/list of values from spot_setup.objectivefunction.
If the objectivefunction value is higher, the results are saved in the database. If not they are ignored (saves storage).
db_precision:np.float type
set np.float16, np.float32 or np.float64 for rounding of floats in the output database
Default is np.float16
sim_timeout: float, int or None, default: None
the defined model given in the spot_setup class can be controlled to break after 'sim_timeout' seconds if
sim_timeout is not None.
If the model run has been broken simlply '[nan]' will be returned.
random_state: int or None, default: None
the algorithms uses the number in random_state as seed for numpy. This way stochastic processes can be reproduced.
"""
_unaccepted_parameter_types = (parameter.List, )
def __init__(self, spot_setup, dbname=None, dbformat=None, dbinit=True,
dbappend=False, parallel='seq', save_sim=True, breakpoint=None,
backup_every_rep=100, save_threshold=-np.inf, db_precision=np.float16,
sim_timeout=None, random_state=None, optimization_direction='grid', algorithm_name=''):
# Initialize the user defined setup class
self.setup = spot_setup
param_info = parameter.get_parameters_array(self.setup, unaccepted_parameter_types=self._unaccepted_parameter_types)
self.all_params = param_info['random']
self.constant_positions = parameter.get_constant_indices(spot_setup)
if self.constant_positions:
self.non_constant_positions = []
for i, val in enumerate(self.all_params):
if self.all_params[i] not in self.constant_positions:
self.non_constant_positions.append(i)
else:
self.non_constant_positions = np.arange(0,len(self.all_params))
self.parameter = self.get_parameters
self.parnames = param_info['name']
self.algorithm_name = algorithm_name
# Create a type to hold the parameter values using a namedtuple
self.partype = parameter.ParameterSet(param_info)
self.evaluation = self.setup.evaluation()
self.save_sim = save_sim
self.optimization_direction = optimization_direction
self.dbname = dbname or 'customDb'
self.dbformat = dbformat or 'ram'
self.db_precision = db_precision
self.breakpoint = breakpoint
self.backup_every_rep = backup_every_rep
# Two parameters to control the data base handling
# 'dbinit' triggers the initial creation of the data base file
# 'dbappend' used to append to the existing data base, after restart
self.dbinit = dbinit
self.dbappend = dbappend
# Set the random state
if random_state is None: #ToDo: Have to discuss if these 3 lines are neccessary.
random_state = np.random.randint(low=0, high=2**30)
np.random.seed(random_state)
# If value is not None a timeout will set so that the simulation will break after sim_timeout seconds without return a value
self.sim_timeout = sim_timeout
self.save_threshold = save_threshold
if breakpoint == 'read' or breakpoint == 'readandwrite':
print('Reading backupfile')
try:
open(self.dbname+'.break')
except FileNotFoundError:
print('Backupfile not found')
self.dbappend = True
# Now a repeater (ForEach-object) is loaded
# A repeater is a convinent wrapper to repeat tasks
# We have the same interface for sequential and for parallel tasks
if parallel == 'seq':
from spotpy.parallel.sequential import ForEach
elif parallel == 'mpi':
from spotpy.parallel.mpi import ForEach
# MPC is based on pathos mutiprocessing and uses ordered map, so results are given back in the order
# as the parameters are
elif parallel == 'mpc':
from spotpy.parallel.mproc import ForEach
# UMPC is based on pathos mutiprocessing and uses unordered map, so results are given back in the order
# as the subprocesses are finished which may speed up the whole simulation process but is not recommended if
# objective functions do their calculation based on the order of the data because the order of the result is chaotic
# and randomized
elif parallel == 'umpc':
from spotpy.parallel.umproc import ForEach
else:
raise ValueError(
"'%s' is not a valid keyword for parallel processing" % parallel)
# This is the repeater for the model runs. The simulate method does the work
# If you need different tasks, the repeater can be pushed into a "phase" using the
# setphase function. The simulate method can check the current phase and dispatch work
# to other functions. This is introduced for sceua to differentiate between burn in and
# the normal work on the chains
self.repeat = ForEach(self.simulate)
# method "save" needs to know whether objective function result is list or float, default is float
self.like_struct_typ = type(1.1)
def __str__(self):
return '{type}({mtype}())->{dbname}'.format(
type=type(self).__name__,
mtype=type(self.setup).__name__,
dbname=self.dbname)
def __repr__(self):
return '{type}()'.format(type=type(self).__name__)
def get_parameters(self):
"""
Returns the parameter array from the setup
"""
pars = parameter.get_parameters_array(self.setup)
return pars[self.non_constant_positions]
def set_repetiton(self, repetitions):
self.status = _RunStatistic(repetitions, self.algorithm_name,
self.optimization_direction, self.parnames)
# In MPI, this command will do nothing on the master process
# but the worker processes are going to wait for jobs.
# Hence the workers will only receive parameters for the
# simulate function, new calculation phases and the termination
self.repeat.start()
def final_call(self):
self.repeat.terminate()
try:
self.datawriter.finalize()
except AttributeError: # Happens if no database was assigned
pass
self.status.print_status_final()
def _init_database(self, like, randompar, simulations):
if self.dbinit:
print('Initialize database...')
self.datawriter = database.get_datawriter(self.dbformat,
self.dbname, self.parnames, like, randompar, simulations,
save_sim=self.save_sim, dbappend=self.dbappend,
dbinit=self.dbinit, db_precision=self.db_precision,
setup=self.setup)
self.dbinit = False
def __is_list_type(self, data):
if type(data) == type:
return data == list or data == type(np.array([]))
else:
return type(data) == list or type(data) == type(np.array([]))
def save(self, like, randompar, simulations, chains=1):
# Initialize the database if no run was performed so far
self._init_database(like, randompar, simulations)
# Test if like and the save threshold are float/list and compare accordingly
if self.__is_list_type(like) and self.__is_list_type(self.save_threshold):
if all(i > j for i, j in zip(like, self.save_threshold)): #Compares list/list
self.datawriter.save(like, randompar, simulations, chains=chains)
if (not self.__is_list_type(like)) and (not self.__is_list_type(self.save_threshold)):
if like>self.save_threshold: #Compares float/float
self.datawriter.save(like, randompar, simulations, chains=chains)
if self.__is_list_type(like) and (not self.__is_list_type(self.save_threshold)):
if like[0]>self.save_threshold: #Compares list/float
self.datawriter.save(like, randompar, simulations, chains=chains)
if (not self.__is_list_type(like)) and self.__is_list_type(self.save_threshold): #Compares float/list
if (like > self.save_threshold).all:
self.datawriter.save(like, randompar, simulations, chains=chains)
def read_breakdata(self, dbname):
''' Read data from a pickle file if a breakpoint is set.
Reason: In case of incomplete optimizations, old data can be restored. '''
import pickle
with open(dbname+'.break', 'rb') as breakfile:
work,backuptime,repos,obmin,obmax=pickle.load(breakfile)
self.status.starttime=self.status.starttime-backuptime
self.status.rep=repos
self.status.objectivefunction_min=obmin
self.status.objectivefunction_max=obmax
return work
def write_breakdata(self, dbname, work):
''' Write data to a pickle file if a breakpoint has been set.'''
import pickle
work=(work,self.status.last_print-self.status.starttime,self.status.rep,self.status.objectivefunction_min,self.status.objectivefunction_max)
with open(str(dbname)+'.break', 'wb') as breakfile:
pickle.dump(work, breakfile)
def getdata(self):
return self.datawriter.getdata()
def update_params(self, params):
#Add potential Constant parameters
self.all_params[self.non_constant_positions] = params
return self.all_params
def postprocessing(self, rep, params, simulation, chains=1, save_run=True, negativlike=False, block_print=False): # TODO: rep not necessaray
params = self.update_params(params)
if negativlike is True:
like = -self.getfitness(simulation=simulation, params=params)
else:
like = self.getfitness(simulation=simulation, params=params)
# Save everything in the database, if save is True
# This is needed as some algorithms just want to know the fitness,
# before they actually save the run in a database (e.g. sce-ua)
self.status(like,params,block_print=block_print)
if save_run is True and simulation is not None:
self.save(like, params, simulations=simulation, chains=chains)
if type(like)==type([]):
return like[0]
else:
return like
def getfitness(self, simulation, params):
"""
Calls the user defined spot_setup objectivefunction
"""
try:
#print('Using parameters in fitness function')
return self.setup.objectivefunction(evaluation=self.evaluation, simulation=simulation, params = (params,self.parnames))
except TypeError: # Happens if the user does not allow to pass parameter in the spot_setup.objectivefunction
#print('Not using parameters in fitness function')
return self.setup.objectivefunction(evaluation=self.evaluation, simulation=simulation)
def simulate(self, id_params_tuple):
"""This is a simple wrapper of the model, returning the result together with
the run id and the parameters. This is needed, because some parallel things
can mix up the ordering of runs
"""
id, params = id_params_tuple
self.all_params[self.non_constant_positions] = params #TODO: List parameters are not updated if not accepted for the algorithm, we may have to warn/error if list is given
all_params = self.all_params
# we need a layer to fetch returned data from a threaded process into a queue.
def model_layer(q,all_params):
# Call self.model with a namedtuple instead of another sequence
q.put(self.setup.simulation(self.partype(*all_params)))
# starting a queue, where in python2.7 this is a multiprocessing class and can cause errors because of
# incompability which the main thread. Therefore only for older Python version a workaround follows
que = Queue()
sim_thread = threading.Thread(target=model_layer, args=(que, all_params))
sim_thread.daemon = True
sim_thread.start()
# If self.sim_timeout is not None the self.model will break after self.sim_timeout seconds otherwise is runs as
# long it needs to run
sim_thread.join(self.sim_timeout)
# If no result from the thread is given, i.e. the thread was killed from the watcher the default result is
# '[nan]' and will not be saved. Otherwise get the result from the thread
model_result = None
if not que.empty():
model_result = que.get()
return id, params, model_result
|
|
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import functools
import os
import inspect
import shutil
import tempfile
import fixtures
from oslotest import base
from oslo.config import cfg
from oslo.messaging import conffixture as messaging_fixture
from oslo.messaging.notify import _impl_test as test_notifier
from testtools import testcase
from designate.openstack.common import log as logging
from designate.openstack.common.fixture import config as cfg_fixture
from designate.openstack.common import importutils
from designate import policy
from designate import utils
from designate.context import DesignateContext
from designate.tests import resources
from designate import exceptions
from designate.network_api import fake as fake_network_api
from designate import network_api
from designate import objects
from designate.manage import database as manage_database
from designate.sqlalchemy import utils as sqlalchemy_utils
LOG = logging.getLogger(__name__)
cfg.CONF.import_opt('storage_driver', 'designate.central',
group='service:central')
cfg.CONF.import_opt('backend_driver', 'designate.agent',
group='service:agent')
cfg.CONF.import_opt('auth_strategy', 'designate.api',
group='service:api')
cfg.CONF.import_opt('connection', 'designate.storage.impl_sqlalchemy',
group='storage:sqlalchemy')
class NotifierFixture(fixtures.Fixture):
def setUp(self):
super(NotifierFixture, self).setUp()
self.addCleanup(test_notifier.reset)
def get(self):
return test_notifier.NOTIFICATIONS
def clear(self):
return test_notifier.reset()
class ServiceFixture(fixtures.Fixture):
def __init__(self, svc_name, *args, **kw):
cls = importutils.import_class(
'designate.%s.service.Service' % svc_name)
self.svc = cls.create(binary='designate-' + svc_name, *args, **kw)
def setUp(self):
super(ServiceFixture, self).setUp()
self.svc.start()
self.addCleanup(self.kill)
def kill(self):
try:
self.svc.kill()
except Exception:
pass
class PolicyFixture(fixtures.Fixture):
def setUp(self):
super(PolicyFixture, self).setUp()
self.addCleanup(policy.reset)
class DatabaseFixture(fixtures.Fixture):
fixtures = {}
@staticmethod
def get_fixture(repo_path, init_version=None):
if repo_path not in DatabaseFixture.fixtures:
DatabaseFixture.fixtures[repo_path] = DatabaseFixture(
repo_path, init_version)
return DatabaseFixture.fixtures[repo_path]
def _mktemp(self):
_, path = tempfile.mkstemp(prefix='designate-', suffix='.sqlite',
dir='/tmp')
return path
def __init__(self, repo_path, init_version=None):
super(DatabaseFixture, self).__init__()
# Create the Golden DB
self.golden_db = self._mktemp()
self.golden_url = 'sqlite:///%s' % self.golden_db
# Migrate the Golden DB
manager = sqlalchemy_utils.get_migration_manager(
repo_path, self.golden_url, init_version)
manager.upgrade(None)
# Prepare the Working Copy DB
self.working_copy = self._mktemp()
self.url = 'sqlite:///%s' % self.working_copy
def setUp(self):
super(DatabaseFixture, self).setUp()
shutil.copyfile(self.golden_db, self.working_copy)
class NetworkAPIFixture(fixtures.Fixture):
def setUp(self):
super(NetworkAPIFixture, self).setUp()
self.api = network_api.get_network_api(cfg.CONF.network_api)
self.fake = fake_network_api
self.addCleanup(self.fake.reset_floatingips)
class TestCase(base.BaseTestCase):
quota_fixtures = [{
'resource': 'domains',
'hard_limit': 5,
}, {
'resource': 'records',
'hard_limit': 50,
}]
server_fixtures = [{
'name': 'ns1.example.org.',
}, {
'name': 'ns2.example.org.',
}, {
'name': 'ns2.example.org.',
}]
# The last tld is invalid
tld_fixtures = [{
'name': 'com',
}, {
'name': 'co.uk',
}, {
'name': 'com.',
}]
default_tld_fixtures = [{
'name': 'com',
}, {
'name': 'org',
}, {
'name': 'net',
}]
tsigkey_fixtures = [{
'name': 'test-key-one',
'algorithm': 'hmac-md5',
'secret': 'SomeSecretKey',
}, {
'name': 'test-key-two',
'algorithm': 'hmac-sha256',
'secret': 'AnotherSecretKey',
}]
# The last domain is invalid
domain_fixtures = [{
'name': 'example.com.',
'email': '[email protected]',
}, {
'name': 'example.net.',
'email': '[email protected]',
}, {
'name': 'example.org.',
'email': '[email protected]',
}, {
'name': 'invalid.com.....',
'email': '[email protected]',
}]
recordset_fixtures = {
'A': [
{'name': 'mail.%s', 'type': 'A'},
{'name': 'www.%s', 'type': 'A'},
],
'MX': [
{'name': 'mail.%s', 'type': 'MX'},
],
'SRV': [
{'name': '_sip._tcp.%s', 'type': 'SRV'},
{'name': '_sip._udp.%s', 'type': 'SRV'},
],
'CNAME': [
{'name': 'www.%s', 'type': 'CNAME'},
{'name': 'sub1.%s', 'type': 'CNAME'},
]
}
record_fixtures = {
'A': [
{'data': '192.0.2.1'},
{'data': '192.0.2.2'}
],
'MX': [
{'data': 'mail.example.org.', 'priority': 5},
{'data': 'mail.example.com.', 'priority': 10},
],
'SRV': [
{'data': '0 5060 server1.example.org.', 'priority': 5},
{'data': '1 5060 server2.example.org.', 'priority': 10},
],
'CNAME': [
{'data': 'www.somedomain.org.'},
{'data': 'www.someotherdomain.com.'},
]
}
ptr_fixtures = [
{'ptrdname': 'srv1.example.com.'},
{'ptrdname': 'srv1.example.net.'}
]
blacklist_fixtures = [{
'pattern': 'blacklisted.com.',
'description': 'This is a comment',
}, {
'pattern': 'blacklisted.net.'
}, {
'pattern': 'blacklisted.org.'
}]
def setUp(self):
super(TestCase, self).setUp()
self.CONF = self.useFixture(cfg_fixture.Config(cfg.CONF)).conf
self.messaging_conf = self.useFixture(
messaging_fixture.ConfFixture(cfg.CONF))
self.messaging_conf.transport_driver = 'fake'
self.config(notification_driver='test')
self.notifications = self.useFixture(NotifierFixture())
self.config(
storage_driver='sqlalchemy',
backend_driver='fake',
group='service:central'
)
self.config(
backend_driver='fake',
group='service:agent'
)
self.config(
auth_strategy='noauth',
group='service:api'
)
# The database fixture needs to be set up here (as opposed to isolated
# in a storage test case) because many tests end up using storage.
REPOSITORY = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', 'storage',
'impl_sqlalchemy',
'migrate_repo'))
self.db_fixture = self.useFixture(
DatabaseFixture.get_fixture(
REPOSITORY, manage_database.INIT_VERSION))
self.config(
connection=self.db_fixture.url,
connection_debug=100,
group='storage:sqlalchemy'
)
self.config(network_api='fake')
self.config(
managed_resource_tenant_id='managing_tenant',
group='service:central')
# "Read" Configuration
self.CONF([], project='designate')
self.useFixture(PolicyFixture())
self.network_api = NetworkAPIFixture()
self.useFixture(self.network_api)
self.central_service = self.start_service('central')
self.admin_context = self.get_admin_context()
# Config Methods
def config(self, **kwargs):
group = kwargs.pop('group', None)
for k, v in kwargs.iteritems():
cfg.CONF.set_override(k, v, group)
def policy(self, rules, default_rule='allow', overwrite=True):
# Inject an allow and deny rule
rules['allow'] = '@'
rules['deny'] = '!'
# Set the rules
policy.set_rules(rules, default_rule, overwrite)
# Other Utility Methods
def get_notifications(self):
return self.notifications.get()
def reset_notifications(self):
self.notifications.clear()
def start_service(self, svc_name, *args, **kw):
"""
Convenience method for starting a service!
"""
fixture = ServiceFixture(svc_name, *args, **kw)
self.useFixture(fixture)
return fixture.svc
# Context Methods
def get_context(self, **kwargs):
return DesignateContext(**kwargs)
def get_admin_context(self):
return DesignateContext.get_admin_context(
tenant=utils.generate_uuid(),
user=utils.generate_uuid())
# Fixture methods
def get_quota_fixture(self, fixture=0, values=None):
values = values or {}
_values = copy.copy(self.quota_fixtures[fixture])
_values.update(values)
return _values
def get_server_fixture(self, fixture=0, values=None):
values = values or {}
_values = copy.copy(self.server_fixtures[fixture])
_values.update(values)
return _values
def get_tld_fixture(self, fixture=0, values=None):
values = values or {}
_values = copy.copy(self.tld_fixtures[fixture])
_values.update(values)
return _values
def get_default_tld_fixture(self, fixture=0, values=None):
values = values or {}
_values = copy.copy(self.default_tld_fixtures[fixture])
_values.update(values)
return _values
def get_tsigkey_fixture(self, fixture=0, values=None):
values = values or {}
_values = copy.copy(self.tsigkey_fixtures[fixture])
_values.update(values)
return _values
def get_domain_fixture(self, fixture=0, values=None):
values = values or {}
_values = copy.copy(self.domain_fixtures[fixture])
_values.update(values)
return _values
def get_recordset_fixture(self, domain_name, type='A', fixture=0,
values=None):
values = values or {}
_values = copy.copy(self.recordset_fixtures[type][fixture])
_values.update(values)
try:
_values['name'] = _values['name'] % domain_name
except TypeError:
pass
return _values
def get_record_fixture(self, recordset_type, fixture=0, values=None):
values = values or {}
_values = copy.copy(self.record_fixtures[recordset_type][fixture])
_values.update(values)
return _values
def get_ptr_fixture(self, fixture=0, values=None):
values = values or {}
_values = copy.copy(self.ptr_fixtures[fixture])
_values.update(values)
return _values
def get_zonefile_fixture(self, variant=None):
if variant is None:
f = 'example.com.zone'
else:
f = '%s_example.com.zone' % variant
path = os.path.join(resources.path, 'zonefiles', f)
with open(path) as zonefile:
return zonefile.read()
def get_blacklist_fixture(self, fixture=0, values=None):
values = values or {}
_values = copy.copy(self.blacklist_fixtures[fixture])
_values.update(values)
return _values
def create_server(self, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_server_fixture(fixture=fixture, values=kwargs)
return self.central_service.create_server(
context, objects.Server(**values))
def create_tld(self, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_tld_fixture(fixture=fixture, values=kwargs)
tld = objects.Tld(**values)
return self.central_service.create_tld(context, tld=tld)
def create_default_tld(self, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_default_tld_fixture(fixture=fixture, values=kwargs)
tld = objects.Tld(**values)
return self.central_service.create_tld(context, tld=tld)
def create_default_tlds(self):
for index in range(len(self.default_tld_fixtures)):
try:
self.create_default_tld(fixture=index)
except exceptions.DuplicateTld:
pass
def create_tsigkey(self, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_tsigkey_fixture(fixture=fixture, values=kwargs)
return self.central_service.create_tsigkey(
context, objects.TsigKey(**values))
def create_domain(self, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
try:
# We always need a server to create a domain..
self.create_server()
except exceptions.DuplicateServer:
pass
values = self.get_domain_fixture(fixture=fixture, values=kwargs)
if 'tenant_id' not in values:
values['tenant_id'] = context.tenant
return self.central_service.create_domain(
context, objects.Domain(**values))
def create_recordset(self, domain, type='A', **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_recordset_fixture(domain['name'], type=type,
fixture=fixture,
values=kwargs)
return self.central_service.create_recordset(
context, domain['id'], recordset=objects.RecordSet(**values))
def create_record(self, domain, recordset, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_record_fixture(recordset['type'], fixture=fixture,
values=kwargs)
return self.central_service.create_record(
context,
domain['id'],
recordset['id'],
record=objects.Record(**values))
def create_blacklist(self, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_blacklist_fixture(fixture=fixture, values=kwargs)
blacklist = objects.Blacklist(**values)
return self.central_service.create_blacklist(
context, blacklist=blacklist)
def _ensure_interface(self, interface, implementation):
for name in interface.__abstractmethods__:
in_arginfo = inspect.getargspec(getattr(interface, name))
im_arginfo = inspect.getargspec(getattr(implementation, name))
self.assertEqual(
in_arginfo, im_arginfo,
"Method Signature for '%s' mismatched" % name)
def _skip_decorator(func):
@functools.wraps(func)
def skip_if_not_implemented(*args, **kwargs):
try:
return func(*args, **kwargs)
except NotImplementedError as e:
raise testcase.TestSkipped(str(e))
except Exception as e:
if 'not implemented' in str(e):
raise testcase.TestSkipped(str(e))
raise
return skip_if_not_implemented
class SkipNotImplementedMeta(type):
def __new__(cls, name, bases, local):
for attr in local:
value = local[attr]
if callable(value) and (
attr.startswith('test_') or attr == 'setUp'):
local[attr] = _skip_decorator(value)
return type.__new__(cls, name, bases, local)
|
|
from django.test import TestCase
from django.utils import timezone
from .. import models
import tmdb3
import datetime
import time
from mock import patch
from django.template import Template, Context
class MockTMDBPoster(object):
def sizes(self):
return ['100', '200']
def geturl(self, size):
return 'http://something.com/{}/img.png'.format(size)
class MockTMDBSeries(object):
def __init__(self, id, name, first_air_date=None):
self.id = id
self.name = name
if first_air_date is None:
first_air_date = datetime.date.today()
self.first_air_date = first_air_date
self.poster = MockTMDBPoster()
class MockTMDBMovie(object):
def __init__(self, id, title, releasedate=None):
self.id = id
self.title = title
if releasedate is None:
releasedate = datetime.date.today()
self.releasedate = releasedate
self.poster = MockTMDBPoster()
class MockGetRated(object):
def __init__(self, media_type, start_id=None):
self.start_id = start_id
if media_type == 'movie':
self.return_class = MockTMDBMovie
elif media_type == 'series':
self.return_class = MockTMDBSeries
def __call__(self):
if self.start_id is None:
return
else:
for new_id in range(self.start_id, self.start_id+3):
yield self.return_class(new_id, 'A Movie or TV show')
self.start_id += 3
def set_last_update_time(new_time):
media_db_state, created = models.MediaDB.objects.get_or_create()
if created:
media_db_state.save()
models.MediaDB.objects.filter(pk=media_db_state.pk).update(update_time=new_time)
class TmdbMoviesTests(TestCase):
def test_save_movie(self):
movie = models.Media.movie_from_tmdb(MockTMDBMovie(100, 'A movie'))
movie.save()
self.assertEqual(models.Media.objects.get(), movie)
def test_no_overwrite(self):
original_movie = models.Media.movie_from_tmdb(
MockTMDBMovie(100, 'Original title'))
original_movie.save()
new_movie = models.Media.movie_from_tmdb(
MockTMDBMovie(100, 'New title'))
new_movie.save()
saved_movie = models.Media.objects.get()
self.assertEqual(saved_movie, original_movie)
self.assertEqual(saved_movie.title, 'Original title')
@patch.object(models, 'get_rated_movies', MockGetRated('movie'))
def test_get_latest_added(self):
one_year_ago = datetime.date.today() - datetime.timedelta(days=365)
two_year_ago = datetime.date.today() - datetime.timedelta(days=365*2)
models.Media.movie_from_tmdb(MockTMDBMovie(100, 'A movie')).save()
models.Media.objects.filter(pk=100).update(added=one_year_ago)
models.Media.movie_from_tmdb(MockTMDBMovie(101, 'Another movie', two_year_ago)).save()
models.Media.objects.filter(pk=101).update(added=one_year_ago)
models.Media.movie_from_tmdb(MockTMDBMovie(102, 'A movie 3: movie harder', one_year_ago)).save()
models.Media.objects.filter(pk=102).update(added=one_year_ago)
models.Media.movie_from_tmdb(MockTMDBMovie(200, 'This movie')).save()
models.Media.movie_from_tmdb(MockTMDBMovie(201, 'That movie', two_year_ago)).save()
models.Media.movie_from_tmdb(MockTMDBMovie(202, 'Which movie?', one_year_ago)).save()
added_recent = models.Media.objects.recently_rated('movie', 3)
self.assertEqual([m.tmdb_id for m in added_recent], [200, 202, 201])
added_recent_plus_1 = models.Media.objects.recently_rated('movie', 4)
self.assertEqual([m.tmdb_id for m in added_recent_plus_1], [200, 202, 201, 100])
@patch.object(models, 'get_rated_movies', MockGetRated('movie', 101))
def test_database_update(self):
models.Media.objects.update_movies_from_tmdb()
all_movies = models.Media.objects.all()
self.assertEqual([m.tmdb_id for m in all_movies], [101, 102, 103])
models.Media.objects.update_movies_from_tmdb()
all_movies = models.Media.objects.all()
self.assertEqual([m.tmdb_id for m in all_movies], [101, 102, 103, 104, 105, 106])
@patch.object(models, 'get_rated_movies', MockGetRated('movie', 101))
def test_cached_update(self):
all_movies = models.Media.objects.recently_rated('movie')
self.assertEqual([m.tmdb_id for m in all_movies], [101, 102, 103])
all_movies = models.Media.objects.recently_rated('movie')
self.assertEqual([m.tmdb_id for m in all_movies], [101, 102, 103])
@patch.object(models, 'get_rated_movies', MockGetRated('movie', 101))
def test_update_23h_ago(self):
now_minus_23h = timezone.now() - datetime.timedelta(hours=23)
all_movies = models.Media.objects.recently_rated('movie')
self.assertEqual([m.tmdb_id for m in all_movies], [101, 102, 103])
set_last_update_time(now_minus_23h)
all_movies = models.Media.objects.recently_rated('movie')
self.assertEqual([m.tmdb_id for m in all_movies], [101, 102, 103])
@patch.object(models, 'get_rated_movies', MockGetRated('movie', 101))
def test_update_25h_ago(self):
now_minus_25h = timezone.now() - datetime.timedelta(hours=25)
all_movies = models.Media.objects.recently_rated('movie')
self.assertEqual([m.tmdb_id for m in all_movies], [101, 102, 103])
set_last_update_time(now_minus_25h)
all_movies = models.Media.objects.recently_rated('movie')
self.assertEqual([m.tmdb_id for m in all_movies], [101, 102, 103, 104, 105, 106])
class TmdbTVTests(TestCase):
def test_save_tv(self):
tv = models.Media.tv_from_tmdb(MockTMDBSeries(100, 'A tv show'))
tv.save()
self.assertEqual(models.Media.objects.get(), tv)
def test_no_overwrite(self):
original_tv = models.Media.tv_from_tmdb(
MockTMDBSeries(100, 'Original title'))
original_tv.save()
new_tv = models.Media.tv_from_tmdb(
MockTMDBSeries(100, 'New title'))
new_tv.save()
saved_tv = models.Media.objects.get()
self.assertEqual(saved_tv, original_tv)
self.assertEqual(saved_tv.title, 'Original title')
@patch.object(models, 'get_rated_tv', MockGetRated('series'))
def test_get_latest_added(self):
one_year_ago = datetime.date.today() - datetime.timedelta(days=365)
two_year_ago = datetime.date.today() - datetime.timedelta(days=365*2)
models.Media.tv_from_tmdb(MockTMDBSeries(100, 'A tv show')).save()
models.Media.objects.filter(pk=100).update(added=one_year_ago)
models.Media.tv_from_tmdb(MockTMDBSeries(101, 'Another show', two_year_ago)).save()
models.Media.objects.filter(pk=101).update(added=one_year_ago)
models.Media.tv_from_tmdb(MockTMDBSeries(102, 'Sesame Street', one_year_ago)).save()
models.Media.objects.filter(pk=102).update(added=one_year_ago)
models.Media.tv_from_tmdb(MockTMDBSeries(200, 'This show')).save()
models.Media.tv_from_tmdb(MockTMDBSeries(201, 'That show', two_year_ago)).save()
models.Media.tv_from_tmdb(MockTMDBSeries(202, 'Which show?', one_year_ago)).save()
added_recent = models.Media.objects.recently_rated('series', 3)
self.assertEqual([m.tmdb_id for m in added_recent], [200, 202, 201])
added_recent_plus_1 = models.Media.objects.recently_rated('series', 4)
self.assertEqual([m.tmdb_id for m in added_recent_plus_1], [200, 202, 201, 100])
@patch.object(models, 'get_rated_tv', MockGetRated('series', 101))
def test_database_update(self):
models.Media.objects.update_tv_from_tmdb()
all_tv = models.Media.objects.all()
self.assertEqual([m.tmdb_id for m in all_tv], [101, 102, 103])
models.Media.objects.update_tv_from_tmdb()
all_tv = models.Media.objects.all()
self.assertEqual([m.tmdb_id for m in all_tv], [101, 102, 103, 104, 105, 106])
@patch.object(models, 'get_rated_tv', MockGetRated('series', 101))
def test_cached_update(self):
all_tv = models.Media.objects.recently_rated('series')
self.assertEqual([m.tmdb_id for m in all_tv], [101, 102, 103])
all_tv = models.Media.objects.recently_rated('series')
self.assertEqual([m.tmdb_id for m in all_tv], [101, 102, 103])
@patch.object(models, 'get_rated_tv', MockGetRated('series', 101))
def test_update_23h_ago(self):
now_minus_23h = timezone.now() - datetime.timedelta(hours=23)
all_tv = models.Media.objects.recently_rated('series')
self.assertEqual([m.tmdb_id for m in all_tv], [101, 102, 103])
set_last_update_time(now_minus_23h)
all_tv = models.Media.objects.recently_rated('series')
self.assertEqual([m.tmdb_id for m in all_tv], [101, 102, 103])
@patch.object(models, 'get_rated_tv', MockGetRated('series', 101))
def test_update_25h_ago(self):
now_minus_25h = timezone.now() - datetime.timedelta(hours=25)
all_tv = models.Media.objects.recently_rated('series')
self.assertEqual([m.tmdb_id for m in all_tv], [101, 102, 103])
set_last_update_time(now_minus_25h)
all_tv = models.Media.objects.recently_rated('series')
self.assertEqual([m.tmdb_id for m in all_tv], [101, 102, 103, 104, 105, 106])
class TmdbMixedMediaTests(TestCase):
@patch.object(models, 'get_rated_tv', MockGetRated('series', 101))
@patch.object(models, 'get_rated_movies', MockGetRated('movie', 201))
def test_get_recent_tv(self):
models.Media.objects.update_tv_from_tmdb()
models.Media.objects.update_movies_from_tmdb()
all_tv = models.Media.objects.recently_rated('series')
self.assertEqual([m.tmdb_id for m in all_tv], [101, 102, 103])
self.assertTrue(all([m.media_type == 'series' for m in all_tv]))
@patch.object(models, 'get_rated_tv', MockGetRated('series', 101))
@patch.object(models, 'get_rated_movies', MockGetRated('movie', 201))
def test_get_recent_movies(self):
models.Media.objects.update_tv_from_tmdb()
models.Media.objects.update_movies_from_tmdb()
all_movies = models.Media.objects.recently_rated('movie')
self.assertEqual([m.tmdb_id for m in all_movies], [201, 202, 203])
self.assertTrue(all([m.media_type == 'movie' for m in all_movies]))
@patch.object(models, 'get_rated_tv', MockGetRated('series', 101))
@patch.object(models, 'get_rated_movies', MockGetRated('movie', 201))
def test_get_recent_media(self):
models.Media.objects.update_tv_from_tmdb()
models.Media.objects.update_movies_from_tmdb()
all_media = models.Media.objects.recently_rated()
self.assertEqual([m.tmdb_id for m in all_media], [101, 102, 103, 201, 202, 203])
self.assertTrue('series' in [m.media_type for m in all_media])
self.assertTrue('movie' in [m.media_type for m in all_media])
@patch.object(models, 'get_rated_tv', MockGetRated('series', 101))
@patch.object(models, 'get_rated_movies', MockGetRated('movie', 201))
def test_get_tv_no_update_movies(self):
all_tv = models.Media.objects.recently_rated('series')
self.assertEqual([m.tmdb_id for m in all_tv], [101, 102, 103])
self.assertFalse(models.Media.objects.filter(media_type='movie').exists())
@patch.object(models, 'get_rated_tv', MockGetRated('series', 101))
@patch.object(models, 'get_rated_movies', MockGetRated('movie', 201))
def test_get_movies_no_update_tv(self):
all_movies = models.Media.objects.recently_rated('movie')
self.assertEqual([m.tmdb_id for m in all_movies], [201, 202, 203])
self.assertFalse(models.Media.objects.filter(media_type='series').exists())
@patch.object(models, 'get_rated_tv', MockGetRated('series', 101))
@patch.object(models, 'get_rated_movies', MockGetRated('movie', 201))
def test_get_all_update_all(self):
all_media = models.Media.objects.recently_rated()
self.assertEqual([m.tmdb_id for m in all_media], [201, 202, 203, 101, 102, 103])
self.assertTrue('series' in [m.media_type for m in all_media])
self.assertTrue('movie' in [m.media_type for m in all_media])
class TMDBTemplateTagTest(TestCase):
TEMPLATE = Template(
"{% load tmdb_tags %}{% tmdb_recent_media %}")
@patch.object(models, 'get_rated_tv', MockGetRated('series'))
@patch.object(models, 'get_rated_movies', MockGetRated('movie'))
def test_movie_shows_up(self):
movie = models.Media.objects.create(
tmdb_id=101, title='101 Dalmations',
media_type='movie', release=datetime.date.today())
movie.save()
rendered = self.TEMPLATE.render(Context({}))
self.assertIn(movie.title, rendered)
@patch.object(models, 'get_rated_tv', MockGetRated('series'))
@patch.object(models, 'get_rated_movies', MockGetRated('movie'))
def test_movie_has_link(self):
movie = models.Media.objects.create(
tmdb_id=101, title='101 Dalmations',
media_type='movie', release=datetime.date.today())
movie.save()
rendered = self.TEMPLATE.render(Context({}))
self.assertIn(movie.get_tmdb_url(), rendered)
|
|
#Author: Andy O'Connell <[email protected]> <[email protected]>
#
# -------------------------- R-Format --------------------------
# o----------------------------------------------------------------o
# | opcode | rs | rt | rd | shift (shamt) | funct |
# |---------|---------|----------|--------|---------------|--------|
# | 6 bits | 5 bits | 5 bits | 5 bits | 5 bits | 6 bits |
# o----------------------------------------------------------------o
# op op op op op op rs rs rs rs rs rt rt rt rt rt rd rd rd rd rd rd -- -- -- -- -- fc fc fc fc fc
# 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00
#
# -------------- I-Format ----------------
# o----------------------------------------o
# | opcode | rs | rt | OFFSET |
# |--------|---------|----------|----------|
# | 6 bits | 5 bits | 5 bits | 16 bits |
# o----------------------------------------o
# op op op op op op rs rs rs rs rs rt rt rt rt rt of of of of of of of of of of of of of of of of
# 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00
#
# ================================== OPCODES =================================
# o-------------------------------------------------------------------------------o
# | mNEMONIC | MEANING | TYPE | OPCODE | FUNCTION |
# |===============================================================================|
# | add | ADD | R | 0x00 | 0x20 |
# | sub | Subtract | R | 0x00 | 0x22 |
# | nop | Null operation | R | 0x00 | 0x00 |
# | lb | Load Byte | I | 0x20 | N/A |
# | sb | Store Byte | I | 0x28 | N/A |
# o-------------------------------------------------------------------------------o
#
# HEX Referance
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# 0 1 2 3 4 5 6 7 8 9 A B C D E F
from Pipeline.MIPSDisassembler import Disassembler # This is from Project 1
class Pipeline(object):
def __init__(self, startProgramCount, InstructionCache):
"""Initalizes the 5 Stages of the Pipeline. (IF, ID, EX, MEM, WB)
"""
#Main Memory, Registers. Must be initialized
self.Main_Mem = None
self.initializeMainMem()
self.Regs = None
self.initializeRegs()
# Instructions to be used. = {0x7A000 : 0xa1020000,}
self.InstructionCache = InstructionCache
self.PC = startProgramCount
#----------------------------------------------------------------------
#IF/ID Write
self.IFID_W_Inst = 0
self.IFID_W_IncrPC = 0
#IF/ID Read
self.IFID_R_Inst = 0
self.IFID_R_IncrPC = 0
#----------------------------------------------------------------------
#ID/EX Write
self.IDEX_W_RegDst = 0
self.IDEX_W_ALUSrc = 0
self.IDEX_W_ALUOp = 0
self.IDEX_W_MemRead = 0
self.IDEX_W_MemWrite = 0
self.IDEX_W_Branch = 0
self.IDEX_W_MemToReg = 0
self.IDEX_W_RegWrite = 0
self.IDEX_W_IncrPC = 0
self.IDEX_W_ReadReg1Value = 0
self.IDEX_W_ReadReg2Value = 0
self.IDEX_W_SEOffset = 0
self.IDEX_W_WriteReg_20_16 = 0
self.IDEX_W_WriteReg_15_11 = 0
self.IDEX_W_Function = 0
#ID/EX Read
self.IDEX_R_RegDst = 0
self.IDEX_R_ALUSrc = 0
self.IDEX_R_ALUOp = 0
self.IDEX_R_MemRead = 0
self.IDEX_R_MemWrite = 0
self.IDEX_R_Branch = 0
self.IDEX_R_MemToReg = 0
self.IDEX_R_RegWrite = 0
self.IDEX_R_IncrPC = 0
self.IDEX_R_ReadReg1Value = 0
self.IDEX_R_ReadReg2Value = 0
self.IDEX_R_SEOffset = 0
self.IDEX_R_WriteReg_20_16 = 0
self.IDEX_R_WriteReg_15_11 = 0
self.IDEX_R_Function = 0
#----------------------------------------------------------------------
#EX/MEM Write
self.EXMEM_W_MemRead = 0
self.EXMEM_W_MemWrite = 0
self.EXMEM_W_Branch = 0
self.EXMEM_W_MemToReg = 0
self.EXMEM_W_RegWrite = 0
self.EXMEM_W_Zero = 0
self.EXMEM_W_ALUResult = 0
self.EXMEM_W_SWValue = 0
self.EXMEM_W_WriteRegNum = 0
#EX/MEM Read
self.EXMEM_R_MemRead = 0
self.EXMEM_R_MemWrite = 0
self.EXMEM_R_Branch = 0
self.EXMEM_R_MemToReg = 0
self.EXMEM_R_RegWrite = 0
self.EXMEM_R_Zero = 0
self.EXMEM_R_ALUResult = 0
self.EXMEM_R_SWValue = 0
self.EXMEM_R_WriteRegNum = 0
#----------------------------------------------------------------------
#MEM/WB Write
self.MEMWB_W_MemToReg = 0
self.MEMWB_W_RegWrite = 0
self.MEMWB_W_LWDataValue = 0
self.MEMWB_W_ALUResult = 0
self.MEMWB_W_WriteRegNum = 0
#MEM/WB Read
self.MEMWB_R_MemToReg = 0
self.MEMWB_R_RegWrite = 0
self.MEMWB_R_LWDataValue = 0
self.MEMWB_R_ALUResult = 0
self.MEMWB_R_WriteRegNum = 0
def IF_stage(self):
"""You will fetch the next instruction out of the Instruction Cache.
Put it in the WRITE version of the IF/ID pipeline Register """
#IF/ID Write
self.IFID_W_Inst = self.InstructionCache[self.PC]
self.IFID_W_IncrPC = self.PC + 0x4
self.PC = self.PC + 0x4
def ID_stage(self):
"""Here you'll read an instruction from the READ version of IF/ID
pipeline register, do the decoding and register fetching and write the
values to the WRITE version of the ID/EX pipeline register."""
# Null Operation Condition
if self.IFID_R_Inst == 0:
self.IDEX_W_RegDst = 0
self.IDEX_W_ALUSrc = 0
self.IDEX_W_ALUOp = 0
self.IDEX_W_MemRead = 0
self.IDEX_W_MemWrite = 0
self.IDEX_W_Branch = 0
self.IDEX_W_MemToReg = 0
self.IDEX_W_RegWrite = 0
self.IDEX_W_IncrPC = 0
self.IDEX_W_ReadReg1Value = 0
self.IDEX_W_ReadReg2Value = 0
self.IDEX_W_SEOffset = 0
self.IDEX_W_WriteReg_20_16 = 0
self.IDEX_W_WriteReg_15_11 = 0
self.IDEX_W_Function = 0
#print("ID_stage: nop")
return True
d = Disassembler()
d.load(self.IFID_R_Inst)
# Set Control Variables
if d.formatType == 'R':
self.IDEX_W_RegDst = 1
self.IDEX_W_ALUSrc = 0
self.IDEX_W_ALUOp = 2
self.IDEX_W_MemRead = 0
self.IDEX_W_MemWrite = 0
self.IDEX_W_Branch = 0
self.IDEX_W_MemToReg = 0
self.IDEX_W_RegWrite = 1 # End of Control
self.IDEX_W_ReadReg1Value = self.Regs[d.rs]
self.IDEX_W_ReadReg2Value = self.Regs[d.rt]
self.IDEX_W_SEOffset = 'x'
self.IDEX_W_WriteReg_20_16 = d.rs
self.IDEX_W_WriteReg_15_11 = d.rd
self.IDEX_W_Function = d.funct
self.IDEX_W_IncrPC = self.IFID_R_IncrPC
elif d.opcode == 0x20: #lb
self.IDEX_W_RegDst = 0
self.IDEX_W_ALUSrc = 1
self.IDEX_W_ALUOp = 0
self.IDEX_W_MemRead = 1
self.IDEX_W_MemWrite = 0
self.IDEX_W_Branch = 0
self.IDEX_W_MemToReg = 1
self.IDEX_W_RegWrite = 1 # End of Control
self.IDEX_W_ReadReg1Value = self.Regs[d.rs]
self.IDEX_W_ReadReg2Value = self.Regs[d.rt]
self.IDEX_W_SEOffset = d.offset
self.IDEX_W_WriteReg_20_16 = d.rs
self.IDEX_W_WriteReg_15_11 = d.rt
self.IDEX_W_Function = 'x'
self.IDEX_W_IncrPC = self.IFID_R_IncrPC
elif d.opcode == 0x28: #sb
self.IDEX_W_RegDst = 'x'
self.IDEX_W_ALUSrc = 1
self.IDEX_W_ALUOp = 0
self.IDEX_W_MemRead = 0
self.IDEX_W_MemWrite = 1
self.IDEX_W_Branch = 0
self.IDEX_W_MemToReg = 'x'
self.IDEX_W_RegWrite = 0 # End of Control
self.IDEX_W_ReadReg1Value = self.Regs[d.rs]
self.IDEX_W_ReadReg2Value = self.Regs[d.rt]
self.IDEX_W_SEOffset = d.offset
self.IDEX_W_WriteReg_20_16 = d.rs
self.IDEX_W_WriteReg_15_11 = d.rt
self.IDEX_W_Function = 'x'
self.IDEX_W_IncrPC = self.IFID_R_IncrPC
def EX_stage(self):
""" Here you'll perform the requested instruction on the spicific
operands you read out of the READ version of the ID/EX pipeline register
and then write the appropriate values to the WRITE version of the EX/MEM
pipeline register. For example, an "add" operation will take the two
operands out of the ID/EX pipeline register and add them
together like this:
EX_MEM_WRITE.ALU_Result = ID_EX_READ.Reg_Val1 + ID_EX_READ.Reg_Val2;
"""
self.EXMEM_W_MemRead = self.IDEX_R_MemRead
self.EXMEM_W_MemWrite = self.IDEX_R_MemWrite
self.EXMEM_W_Branch = self.IDEX_R_Branch
self.EXMEM_W_MemToReg = self.IDEX_R_MemToReg
self.EXMEM_W_RegWrite = self.IDEX_R_RegWrite
# Calculate ALUResult based on ALUOp
if self.IDEX_R_ALUOp == 2 and self.IDEX_R_Function == 0x20: #R-Type Add
self.EXMEM_W_ALUResult = self.IDEX_R_ReadReg1Value + self.IDEX_R_ReadReg2Value
elif self.IDEX_R_ALUOp == 2 and self.IDEX_R_Function == 0x22: #R-Type sub
self.EXMEM_W_ALUResult = self.IDEX_R_ReadReg1Value + self.IDEX_R_ReadReg2Value
elif self.IDEX_R_ALUOp == 0: #lb and sb
self.EXMEM_W_ALUResult = self.IDEX_R_ReadReg1Value + self.IDEX_R_SEOffset
# Zero
if self.EXMEM_W_ALUResult == 0:
self.EXMEM_W_Zero = 1
else:
self.EXMEM_W_Zero = 0
self.EXMEM_W_SWValue = self.IDEX_R_ReadReg2Value
self.EXMEM_W_WriteRegNum = self.IDEX_R_WriteReg_15_11
def MEM_stage(self):
"""If the instruction is a lb, then use the address you calculated in
the EX stage as an index into your Main Memory array and get the value
that is there. Otherwise, just pass information from the READ version
of the EX_MEM pipeline register to the WRITE version of MEM_WB.
"""
self.MEMWB_W_MemToReg = self.EXMEM_R_MemToReg
self.MEMWB_W_RegWrite = self.EXMEM_R_RegWrite
self.MEMWB_W_ALUResult = self.EXMEM_R_ALUResult
self.MEMWB_W_WriteRegNum = self.EXMEM_R_WriteRegNum
if self.EXMEM_R_MemToReg == 1:
#print("Loading x{0:x} from Main Mem[x{1:x}]".format(self.Main_Mem[self.EXMEM_R_ALUResult], self.EXMEM_R_ALUResult))
self.MEMWB_W_LWDataValue = self.Main_Mem[self.EXMEM_R_ALUResult]
else:
self.MEMWB_W_LWDataValue = 'x'
if self.EXMEM_R_MemWrite == 1:
#print("Storing x{0:x} to Main Mem[x{1:x}]".format(self.EXMEM_R_SWValue, self.EXMEM_R_ALUResult))
self.Main_Mem[self.EXMEM_R_ALUResult] = self.EXMEM_R_SWValue
def WB_stage(self):
"""Write to the registers based on information you read out of the READ
version of MEM_WB.
"""
# R-Format
if (self.MEMWB_R_MemToReg == 0) and (self.MEMWB_R_RegWrite == 1):
self.Regs[self.MEMWB_R_WriteRegNum] = self.MEMWB_R_ALUResult
# lb
elif (self.MEMWB_R_MemToReg == 1) and (self.MEMWB_R_RegWrite == 1):
self.Regs[self.MEMWB_R_WriteRegNum] = self.MEMWB_R_LWDataValue
def Print_out_everything(self):
#IF
print("GlobalPC: x{0:x}".format(self.PC))
print("Inst: x{0:x}".format(self.IFID_W_Inst))
print("IncrPC: x{0:x}".format(self.IFID_W_IncrPC))
#IF/ID
print('\nIF/ID Write ----------------')
print('Inst = x{0:x}'.format(self.IFID_W_Inst))
print('IF/ID Read ------------------')
print('Inst = x{0:x}'.format(self.IFID_R_Inst))
#ID/EX
print('\nID/EX Write ----------------')
print('RegDst: ',self.IDEX_W_RegDst)
print('ALUSrc: ',self.IDEX_W_ALUSrc)
print('ALUOp: ',self.IDEX_W_ALUOp)
print('MemRead: ',self.IDEX_W_MemRead)
print('Memwrite: ',self.IDEX_W_MemWrite)
print('MemToReg: ',self.IDEX_W_MemToReg)
print('RegWrite: ',self.IDEX_W_RegWrite)
print('IncrPC: {0:x}'.format(self.IDEX_W_IncrPC))
print('ReadReg1Value: {0:x}'.format(self.IDEX_W_ReadReg1Value))
print('ReadReg2Value: {0:x}'.format(self.IDEX_W_ReadReg2Value))
print('SEOffset: ',self.IDEX_W_SEOffset)
print('WriteReg_20_16: ',self.IDEX_W_WriteReg_20_16)
print('WriteReg_15_11: ',self.IDEX_W_WriteReg_15_11)
print('Function: ',self.IDEX_W_Function)
print('\nID/EX Read ------------------')
print('RegDst: ',self.IDEX_R_RegDst)
print('ALUSrc: ',self.IDEX_R_ALUSrc)
print('ALUOp: ',self.IDEX_R_ALUOp)
print('MemRead: ',self.IDEX_R_MemRead)
print('Memwrite: ',self.IDEX_R_MemWrite)
print('MemToReg: ',self.IDEX_R_MemToReg)
print('RegWrite: ',self.IDEX_R_RegWrite)
print('IncrPC: {0:x}'.format(self.IDEX_R_IncrPC))
print('ReadReg1Value: {0:x}'.format(self.IDEX_R_ReadReg1Value))
print('ReadReg2Value: {0:x}'.format(self.IDEX_R_ReadReg2Value))
print('SEOffset: ',self.IDEX_R_SEOffset)
print('WriteReg_20_16: ',self.IDEX_R_WriteReg_20_16)
print('WriteReg_15_11: ',self.IDEX_R_WriteReg_15_11)
print('Function: ',self.IDEX_R_Function)
#EX
print('\nEX/MEM Write-------------------')
print("MemRead: ", self.EXMEM_W_MemRead)
print("MemWrite: ", self.EXMEM_W_MemWrite)
print("Branch: ", self.EXMEM_W_Branch)
print("MemToReg: ", self.EXMEM_W_MemToReg)
print("RegWrite: ", self.EXMEM_W_RegWrite)
print("Zero: ", self.EXMEM_W_Zero)
print("ALUResult: {0:x}".format(self.EXMEM_W_ALUResult))
print("SWValue: {0:x}".format(self.EXMEM_W_SWValue))
print("WriteRegNum: ", self.EXMEM_W_WriteRegNum)
print('\nEX/MEM Read-------------------')
print("MemRead: ", self.EXMEM_R_MemRead)
print("MemWrite: ", self.EXMEM_R_MemWrite)
print("Branch: ", self.EXMEM_R_Branch)
print("MemToReg: ", self.EXMEM_R_MemToReg)
print("RegWrite: ", self.EXMEM_R_RegWrite)
print("Zero: ", self.EXMEM_R_Zero)
print("ALUResult: {0:x}".format(self.EXMEM_R_ALUResult))
print("SWValue: {0:x}".format(self.EXMEM_R_SWValue))
print("WriteRegNum: ", self.EXMEM_R_WriteRegNum)
#MEM
print('\nMEM/WB Write-----------------------')
print("MemToReg: ", self.MEMWB_W_MemToReg)
print("RegWrite: ", self.MEMWB_W_RegWrite)
print("ALUResult: {0:x}".format(self.MEMWB_W_ALUResult))
print("WriteRegNum: ", self.MEMWB_W_WriteRegNum)
print("LWDataValue: ", self.MEMWB_W_LWDataValue)
print('\nMEM/WB Read-----------------------')
print("MemToReg: ", self.MEMWB_R_MemToReg)
print("RegWrite: ", self.MEMWB_R_RegWrite)
print("ALUResult: {0:x}".format(self.MEMWB_R_ALUResult))
print("WriteRegNum: ", self.MEMWB_R_WriteRegNum)
print("LWDataValue: ", self.MEMWB_R_LWDataValue)
# #WB
# print('\n-------------WB-------------')
# print("MemToReg: ",self.MEMWB_R_MemToReg)
# print("RegWrite: ",self.MEMWB_R_RegWrite)
# print("LWDataValue: ",self.MEMWB_R_LWDataValue)
# print("ALUResult: {0:x}".format(self.MEMWB_R_ALUResult))
# print("WriteRegNum: ",self.MEMWB_R_WriteRegNum)
print("\n------------Registers-------------")
for i in range(len(self.Regs)):
print("Regs[{0:d}] = {1:x}".format(i, self.Regs[i]))
def Copy_write_to_read(self):
#IF/ID Read
self.IFID_R_Inst = self.IFID_W_Inst
self.IFID_R_IncrPC = self.IFID_W_IncrPC
#ID/EX Read
self.IDEX_R_RegDst = self.IDEX_W_RegDst
self.IDEX_R_ALUSrc = self.IDEX_W_ALUSrc
self.IDEX_R_ALUOp = self.IDEX_W_ALUOp
self.IDEX_R_MemRead = self.IDEX_W_MemRead
self.IDEX_R_MemWrite = self.IDEX_W_MemWrite
self.IDEX_R_Branch = self.IDEX_W_Branch
self.IDEX_R_MemToReg = self.IDEX_W_MemToReg
self.IDEX_R_RegWrite = self.IDEX_W_RegWrite
self.IDEX_R_IncrPC = self.IDEX_W_IncrPC
self.IDEX_R_ReadReg1Value = self.IDEX_W_ReadReg1Value
self.IDEX_R_ReadReg2Value = self.IDEX_W_ReadReg2Value
self.IDEX_R_SEOffset = self.IDEX_W_SEOffset
self.IDEX_R_WriteReg_20_16 = self.IDEX_W_WriteReg_20_16
self.IDEX_R_WriteReg_15_11 = self.IDEX_W_WriteReg_15_11
self.IDEX_R_Function = self.IDEX_W_Function
#EX/MEM Read
self.EXMEM_R_MemRead = self.EXMEM_W_MemRead
self.EXMEM_R_MemWrite = self.EXMEM_W_MemWrite
self.EXMEM_R_Branch = self.EXMEM_W_Branch
self.EXMEM_R_MemToReg = self.EXMEM_W_MemToReg
self.EXMEM_R_RegWrite = self.EXMEM_W_RegWrite
self.EXMEM_R_Zero = self.EXMEM_W_Zero
self.EXMEM_R_ALUResult = self.EXMEM_W_ALUResult
self.EXMEM_R_SWValue = self.EXMEM_W_SWValue
self.EXMEM_R_WriteRegNum = self.EXMEM_W_WriteRegNum
#MEM/WB Read
self.MEMWB_R_MemToReg = self.MEMWB_W_MemToReg
self.MEMWB_R_RegWrite = self.MEMWB_W_RegWrite
self.MEMWB_R_LWDataValue = self.MEMWB_W_LWDataValue
self.MEMWB_R_ALUResult = self.MEMWB_W_ALUResult
self.MEMWB_R_WriteRegNum = self.MEMWB_W_WriteRegNum
def initializeMainMem(self):
self.Main_Mem = []
for i in range(1024):
self.Main_Mem.append( i & 0b000011111111 )
def initializeRegs(self):
self.Regs = []
for i in range(32):
self.Regs.append( i + 0x100)
self.Regs[0] = 0 #Special Case for Reg 0
|
|
"""
Handle files and directories over SSH.
The `scp` command line tool is preferred to paramiko for performance reasons.
Then to avoid mixing command line tools and paramiko, `ssh` is also used.
Please refer to `ssh` and `ssh_config` documentation to configure session
multiplexing.
Examples
========
>>> import os
>>> import tempfile
>>> host = os.getenv('EXAMPLE_HOST', 'localhost')
>>> dir = Resource('/tmp', host)
>>> isinstance(dir, SSHDirectoryResource)
True
>>> tmp = tempfile.NamedTemporaryFile()
>>> with open(tmp.name, 'wb') as fp:
... fp.write('a\\nb\\nc\\n')
>>> dir.add(tmp.name)
>>> filename = os.path.basename(tmp.name)
>>> filename in dir
True
>>> file = dir[filename]
>>> file.read()
'a\\nb\\nc\\n'
>>> list(file)
['a', 'b', 'c']
>>> file.get(tmp.name) == tmp.name
True
>>> dir.remove(tmp.name)
>>> tmp.name in dir
False
"""
from __future__ import absolute_import
import os
import subprocess
import collections
from .base import AbstractResource, scheme_to_resource
DEFAULT_LOCATION = 'localhost'
StatResult = collections.namedtuple('StatResult',
['st_atime', 'st_mtime', 'st_ctime'])
def _stat(path, format, location=DEFAULT_LOCATION):
return ssh(location,
"stat \"--format='{}'\" {}".format(format, path))[0]
def is_dir(path, location=DEFAULT_LOCATION):
return (_stat(path, ' %F', location)
.strip()
.rsplit(' ', 1)[-1]) == 'directory'
OPTIONS = ' '.join(['-o "PasswordAuthentication no"', # returns an error
# instead of asking the password if public is not accepted
'-o "StrictHostKeyChecking no"', # does not ask to
# confirm the host fingerprint
])
SCP = 'scp -B {options}'.format(options=OPTIONS)
SSH = 'ssh {options}'.format(options=OPTIONS)
def path(host, path, user=''):
return '{user}@{host}:{path}'.format(user=user, host=host, path=path)
def scp(src, dst, options=None):
if options is None:
options = []
command = '{scp} {options} {src} {dst}'.format(
scp=SCP, src=src, dst=dst,
options=' '.join(options))
return subprocess.call(command, shell=True)
def ssh(host, command):
command = '{ssh} {host} {cmd}'.format(ssh=SSH,
host=host,
cmd=command)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
return process.communicate()
def Resource(path, location=DEFAULT_LOCATION):
if is_dir(path, location):
return SSHDirectoryResource(path, location)
return SSHFileResource(path, location)
class SSHResource(AbstractResource):
type = 'ssh'
def __init__(self, path, location):
"""
Parameters
---------
path: str
path to a file on the remote host
location: str
location of the remote host:
- host
- user@host
"""
AbstractResource.__init__(self, 'ssh://' + location + path)
self._path = path
self._location = location
location_ = location.split('@', 1)
if len(location_) > 1:
self._user = location_[0]
self._host = location_[1]
else:
self._host = location
self._remote_path = '{host}:{path}'.format(host=location, path=path)
@property
def path(self):
return self._path
@property
def location(self):
return self._location
@property
def name(self):
return os.path.basename(self._path)
@property
def user(self):
return self._user
@property
def host(self):
return self._host
@property
def size(self):
return int(ssh(self._location,
'du -bs {}'.format(self.path))[0].split()[0])
@property
def atime(self):
return self.stat().st_atime
@property
def ctime(self):
return self.stat().st_ctime
@property
def mtime(self):
return self.stat().st_mtime
def stat(self):
stdout = ssh(self._location,
"stat \"--format='%X %Y %Z'\" {}".format(self._path))[0]
return StatResult(*map(int, stdout.split()))
def get(self, path):
scp(self._remote_path, path)
return path
def put(self, path):
scp(path, self._remote_path)
class SSHFileResource(SSHResource):
def read(self):
return ssh(self._location, 'cat {}'.format(self._path))[0]
def __iter__(self):
start = 0
end = 0
content = self.read()
for c in content:
end += 1
if c == '\n':
yield content[start:end - 1]
start = end
class SSHDirectoryResource(SSHResource):
def join(self, name):
return os.path.join(self.path, name)
__div__ = join
def __getitem__(self, name):
return Resource(self.join(name), self._location)
def add(self, path, overwrite=True):
filename = os.path.basename(path)
if not overwrite and filename in self:
raise IOError("'{}' already exists in '{}' on {}".format(
filename, self.path, self.location))
SSHFileResource(self / filename, self._location).put(path)
def update(self, files):
for file in files:
file.put(self.join(file.name))
def remove(self, filename):
ssh(self._location, 'rm {}'.format(self.join(filename)))
def __iter__(self):
stdout = ssh(self._location, 'ls -1 {}'.format(self._path))[0]
return iter(stdout.strip().split())
def __contains__(self, name):
return name in list(self)
def get(self, path):
scp(self._remote_path, path, options=['-r'])
return path
def put(self, path):
scp(path, self._remote_path, options=['-r'])
scheme_to_resource.register('ssh', Resource)
|
|
import tox
import py
import pytest
import sys
from tox._pytestplugin import ReportExpectMock
try:
import json
except ImportError:
import simplejson as json
pytest_plugins = "pytester"
from tox._cmdline import Session
from tox._config import parseconfig
def test_report_protocol(newconfig):
config = newconfig([], """
[testenv:mypython]
deps=xy
""")
class Popen:
def __init__(self, *args, **kwargs):
pass
def communicate(self):
return "", ""
def wait(self):
pass
session = Session(config, popen=Popen,
Report=ReportExpectMock)
report = session.report
report.expect("using")
venv = session.getvenv("mypython")
venv.update()
report.expect("logpopen")
def test__resolve_pkg(tmpdir, mocksession):
distshare = tmpdir.join("distshare")
spec = distshare.join("pkg123-*")
py.test.raises(tox.exception.MissingDirectory,
'mocksession._resolve_pkg(spec)')
distshare.ensure(dir=1)
py.test.raises(tox.exception.MissingDependency,
'mocksession._resolve_pkg(spec)')
distshare.ensure("pkg123-1.3.5.zip")
p = distshare.ensure("pkg123-1.4.5.zip")
mocksession.report.clear()
result = mocksession._resolve_pkg(spec)
assert result == p
mocksession.report.expect("info", "determin*pkg123*")
distshare.ensure("pkg123-1.4.7dev.zip")
mocksession._clearmocks()
result = mocksession._resolve_pkg(spec)
mocksession.report.expect("warning", "*1.4.7*")
assert result == p
mocksession._clearmocks()
distshare.ensure("pkg123-1.4.5a1.tar.gz")
result = mocksession._resolve_pkg(spec)
assert result == p
def test__resolve_pkg_doubledash(tmpdir, mocksession):
distshare = tmpdir.join("distshare")
p = distshare.ensure("pkg-mine-1.3.0.zip")
res = mocksession._resolve_pkg(distshare.join("pkg-mine*"))
assert res == p
distshare.ensure("pkg-mine-1.3.0a1.zip")
res = mocksession._resolve_pkg(distshare.join("pkg-mine*"))
assert res == p
class TestSession:
def test_make_sdist(self, initproj):
initproj("example123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
'''
})
config = parseconfig([])
session = Session(config)
sdist = session.sdist()
assert sdist.check()
assert sdist.ext == ".zip"
assert sdist == config.distdir.join(sdist.basename)
sdist2 = session.sdist()
assert sdist2 == sdist
sdist.write("hello")
assert sdist.stat().size < 10
sdist_new = Session(config).sdist()
assert sdist_new == sdist
assert sdist_new.stat().size > 10
def test_make_sdist_distshare(self, tmpdir, initproj):
distshare = tmpdir.join("distshare")
initproj("example123-0.6", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[tox]
distshare=%s
''' % distshare
})
config = parseconfig([])
session = Session(config)
sdist = session.sdist()
assert sdist.check()
assert sdist.ext == ".zip"
assert sdist == config.distdir.join(sdist.basename)
sdist_share = config.distshare.join(sdist.basename)
assert sdist_share.check()
assert sdist_share.read("rb") == sdist.read("rb"), (sdist_share, sdist)
def test_log_pcall(self, mocksession):
mocksession.config.logdir.ensure(dir=1)
assert not mocksession.config.logdir.listdir()
action = mocksession.newaction(None, "something")
action.popen(["echo", ])
match = mocksession.report.getnext("logpopen")
assert match[1].outpath.relto(mocksession.config.logdir)
assert match[1].shell == False
def test_summary_status(self, initproj, capfd):
initproj("logexample123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv:hello]
[testenv:world]
'''
})
config = parseconfig([])
session = Session(config)
envlist = ['hello', 'world']
envs = session.venvlist
assert len(envs) == 2
env1, env2 = envs
env1.status = "FAIL XYZ"
assert env1.status
env2.status = 0
assert not env2.status
session._summary()
out, err = capfd.readouterr()
exp = "%s: FAIL XYZ" % env1.envconfig.envname
assert exp in out
exp = "%s: commands succeeded" % env2.envconfig.envname
assert exp in out
def test_getvenv(self, initproj, capfd):
initproj("logexample123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv:hello]
[testenv:world]
'''
})
config = parseconfig([])
session = Session(config)
venv1 = session.getvenv("hello")
venv2 = session.getvenv("hello")
assert venv1 is venv2
venv1 = session.getvenv("world")
venv2 = session.getvenv("world")
assert venv1 is venv2
pytest.raises(LookupError, lambda: session.getvenv("qwe"))
# not sure we want this option ATM
def XXX_test_package(cmd, initproj):
initproj("myproj-0.6", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'MANIFEST.in': """
include doc
include myproj
""",
'tox.ini': ''
})
result = cmd.run("tox", "package")
assert not result.ret
result.stdout.fnmatch_lines([
"*created sdist package at*",
])
def test_minversion(cmd, initproj):
initproj("interp123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[tox]
minversion = 6.0
'''
})
result = cmd.run("tox", "-v")
result.stdout.fnmatch_lines([
"*ERROR*tox version is * required is at least 6.0*"
])
assert result.ret
def test_unknown_interpreter_and_env(cmd, initproj):
initproj("interp123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv:python]
basepython=xyz_unknown_interpreter
[testenv]
changedir=tests
'''
})
result = cmd.run("tox")
assert result.ret
result.stdout.fnmatch_lines([
"*ERROR*InterpreterNotFound*xyz_unknown_interpreter*",
])
result = cmd.run("tox", "-exyz")
assert result.ret
result.stdout.fnmatch_lines([
"*ERROR*unknown*",
])
def test_unknown_interpreter(cmd, initproj):
initproj("interp123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv:python]
basepython=xyz_unknown_interpreter
[testenv]
changedir=tests
'''
})
result = cmd.run("tox")
assert result.ret
result.stdout.fnmatch_lines([
"*ERROR*InterpreterNotFound*xyz_unknown_interpreter*",
])
def test_unknown_dep(cmd, initproj):
initproj("dep123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv]
deps=qweqwe123
changedir=tests
'''
})
result = cmd.run("tox", )
assert result.ret
result.stdout.fnmatch_lines([
"*ERROR*could not install*qweqwe123*",
])
def test_unknown_environment(cmd, initproj):
initproj("env123-0.7", filedefs={
'tox.ini': ''
})
result = cmd.run("tox", "-e", "qpwoei")
assert result.ret
result.stdout.fnmatch_lines([
"*ERROR*unknown*environment*qpwoei*",
])
def test_skip_sdist(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """
syntax error
"""
,
'tox.ini': '''
[tox]
skipsdist=True
[testenv]
commands=python -c "print('done')"
'''
})
result = cmd.run("tox", )
assert result.ret == 0
def test_minimal_setup_py_empty(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """
"""
,
'tox.ini': ''
})
result = cmd.run("tox", )
assert result.ret == 1
result.stdout.fnmatch_lines([
"*ERROR*empty*",
])
def test_minimal_setup_py_comment_only(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """\n# some comment
"""
,
'tox.ini': ''
})
result = cmd.run("tox", )
assert result.ret == 1
result.stdout.fnmatch_lines([
"*ERROR*empty*",
])
def test_minimal_setup_py_non_functional(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """
import sys
"""
,
'tox.ini': ''
})
result = cmd.run("tox", )
assert result.ret == 1
result.stdout.fnmatch_lines([
"*ERROR*check setup.py*",
])
def test_sdist_fails(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """
syntax error
"""
,
'tox.ini': '',
})
result = cmd.run("tox", )
assert result.ret
result.stdout.fnmatch_lines([
"*FAIL*could not package project*",
])
def test_package_install_fails(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """
from setuptools import setup
setup(
name='pkg123',
description='pkg123 project',
version='0.7',
license='MIT',
platforms=['unix', 'win32'],
packages=['pkg123',],
install_requires=['qweqwe123'],
)
"""
,
'tox.ini': '',
})
result = cmd.run("tox", )
assert result.ret
result.stdout.fnmatch_lines([
"*InvocationError*",
])
class TestToxRun:
@pytest.fixture
def example123(self, initproj):
initproj("example123-0.5", filedefs={
'tests': {'test_hello.py': """
def test_hello(pytestconfig):
pass
""",
},
'tox.ini': '''
[testenv]
changedir=tests
commands= py.test --basetemp={envtmpdir} \
--junitxml=junit-{envname}.xml
deps=pytest
'''
})
def test_toxuone_env(self, cmd, example123):
result = cmd.run("tox")
assert not result.ret
result.stdout.fnmatch_lines([
"*junit-python.xml*",
"*1 passed*",
])
result = cmd.run("tox", "-epython", )
assert not result.ret
result.stdout.fnmatch_lines([
"*1 passed*",
"*summary*",
"*python: commands succeeded"
])
def test_different_config_cwd(self, cmd, example123, monkeypatch):
# see that things work with a different CWD
monkeypatch.chdir(cmd.tmpdir)
result = cmd.run("tox", "-c", "example123/tox.ini")
assert not result.ret
result.stdout.fnmatch_lines([
"*1 passed*",
"*summary*",
"*python: commands succeeded"
])
def test_json(self, cmd, example123):
# see that tests can also fail and retcode is correct
testfile = py.path.local("tests").join("test_hello.py")
assert testfile.check()
testfile.write("def test_fail(): assert 0")
jsonpath = cmd.tmpdir.join("res.json")
result = cmd.run("tox", "--result-json", jsonpath)
assert result.ret == 1
data = json.load(jsonpath.open("r"))
verify_json_report_format(data)
result.stdout.fnmatch_lines([
"*1 failed*",
"*summary*",
"*python: *failed*",
])
def test_develop(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
"""})
result = cmd.run("tox", "-vv", "--develop")
assert not result.ret
assert "sdist-make" not in result.stdout.str()
def test_usedevelop(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
[testenv]
usedevelop=True
"""})
result = cmd.run("tox", "-vv")
assert not result.ret
assert "sdist-make" not in result.stdout.str()
def test_usedevelop_mixed(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
[testenv:devenv]
usedevelop=True
[testenv:nondev]
usedevelop=False
"""})
# running only 'devenv' should not do sdist
result = cmd.run("tox", "-vv", "-e", "devenv")
assert not result.ret
assert "sdist-make" not in result.stdout.str()
# running all envs should do sdist
result = cmd.run("tox", "-vv")
assert not result.ret
assert "sdist-make" in result.stdout.str()
def test_test_usedevelop(cmd, initproj):
initproj("example123-0.5", filedefs={
'tests': {'test_hello.py': """
def test_hello(pytestconfig):
pass
""",
},
'tox.ini': '''
[testenv]
usedevelop=True
changedir=tests
commands=
py.test --basetemp={envtmpdir} --junitxml=junit-{envname}.xml []
deps=pytest
'''
})
result = cmd.run("tox", "-v")
assert not result.ret
result.stdout.fnmatch_lines([
"*junit-python.xml*",
"*1 passed*",
])
assert "sdist-make" not in result.stdout.str()
result = cmd.run("tox", "-epython", )
assert not result.ret
result.stdout.fnmatch_lines([
"*1 passed*",
"*summary*",
"*python: commands succeeded"
])
# see that things work with a different CWD
old = cmd.tmpdir.chdir()
result = cmd.run("tox", "-c", "example123/tox.ini")
assert not result.ret
result.stdout.fnmatch_lines([
"*1 passed*",
"*summary*",
"*python: commands succeeded"
])
old.chdir()
# see that tests can also fail and retcode is correct
testfile = py.path.local("tests").join("test_hello.py")
assert testfile.check()
testfile.write("def test_fail(): assert 0")
result = cmd.run("tox", )
assert result.ret
result.stdout.fnmatch_lines([
"*1 failed*",
"*summary*",
"*python: *failed*",
])
def test_test_piphelp(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
# content of: tox.ini
[testenv]
commands=pip -h
[testenv:py26]
basepython=python
[testenv:py27]
basepython=python
"""})
result = cmd.run("tox")
assert not result.ret
def test_notest(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
# content of: tox.ini
[testenv:py26]
basepython=python
"""})
result = cmd.run("tox", "-v", "--notest")
assert not result.ret
result.stdout.fnmatch_lines([
"*summary*",
"*py26*skipped tests*",
])
result = cmd.run("tox", "-v", "--notest", "-epy26")
assert not result.ret
result.stdout.fnmatch_lines([
"*py26*reusing*",
])
def test_PYC(initproj, cmd, monkeypatch):
initproj("example123", filedefs={'tox.ini': ''})
monkeypatch.setenv("PYTHONDOWNWRITEBYTECODE", 1)
result = cmd.run("tox", "-v", "--notest")
assert not result.ret
result.stdout.fnmatch_lines([
"*create*",
])
def test_env_VIRTUALENV_PYTHON(initproj, cmd, monkeypatch):
initproj("example123", filedefs={'tox.ini': ''})
monkeypatch.setenv("VIRTUALENV_PYTHON", '/FOO')
result = cmd.run("tox", "-v", "--notest")
assert not result.ret, result.stdout.lines
result.stdout.fnmatch_lines([
"*create*",
])
def test_sdistonly(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
"""})
result = cmd.run("tox", "-v", "--sdistonly")
assert not result.ret
result.stdout.fnmatch_lines([
"*sdist-make*setup.py*",
])
assert "virtualenv" not in result.stdout.str()
def test_separate_sdist_no_sdistfile(cmd, initproj):
distshare = cmd.tmpdir.join("distshare")
initproj("pkg123-0.7", filedefs={
'tox.ini': """
[tox]
distshare=%s
""" % distshare
})
result = cmd.run("tox", "--sdistonly")
assert not result.ret
l = distshare.listdir()
assert len(l) == 1
sdistfile = l[0]
def test_separate_sdist(cmd, initproj):
distshare = cmd.tmpdir.join("distshare")
initproj("pkg123-0.7", filedefs={
'tox.ini': """
[tox]
distshare=%s
sdistsrc={distshare}/pkg123-0.7.zip
""" % distshare
})
result = cmd.run("tox", "--sdistonly")
assert not result.ret
l = distshare.listdir()
assert len(l) == 1
sdistfile = l[0]
result = cmd.run("tox", "-v", "--notest")
assert not result.ret
result.stdout.fnmatch_lines([
"*inst*%s*" % sdistfile,
])
def test_sdist_latest(tmpdir, newconfig):
distshare = tmpdir.join("distshare")
config = newconfig([], """
[tox]
distshare=%s
sdistsrc={distshare}/pkg123-*
""" % distshare)
p0 = distshare.ensure("pkg123-1.3.5.zip")
p = distshare.ensure("pkg123-1.4.5.zip")
distshare.ensure("pkg123-1.4.5a1.zip")
session = Session(config)
sdist_path = session.sdist()
assert sdist_path == p
def test_installpkg(tmpdir, newconfig):
p = tmpdir.ensure("pkg123-1.0.zip")
config = newconfig(["--installpkg=%s" % p], "")
session = Session(config)
sdist_path = session.sdist()
assert sdist_path == p
@pytest.mark.xfail("sys.platform == 'win32' and sys.version_info < (2,6)",
reason="test needs better impl")
def test_envsitepackagesdir(cmd, initproj):
initproj("pkg512-0.0.5", filedefs={
'tox.ini': """
[testenv]
commands=
python -c "print(r'X:{envsitepackagesdir}')"
"""})
result = cmd.run("tox")
assert result.ret == 0
result.stdout.fnmatch_lines("""
X:*tox*site-packages*
""")
def verify_json_report_format(data, testenvs=True):
assert data["reportversion"] == "1"
assert data["toxversion"] == tox.__version__
if testenvs:
for envname, envdata in data["testenvs"].items():
for commandtype in ("setup", "test"):
if commandtype not in envdata:
continue
for command in envdata[commandtype]:
assert command["output"]
assert command["retcode"]
pyinfo = envdata["python"]
assert isinstance(pyinfo["version_info"], list)
assert pyinfo["version"]
assert pyinfo["executable"]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.