gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
##########################################################################
#
# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2014, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import re
import sys
import functools
import collections
import Gaffer
import GafferUI
QtGui = GafferUI._qtImport( "QtGui" )
## A class for laying out widgets to represent all the plugs held on a particular parent.
#
# Per-plug metadata support :
#
# - "layout:index" controls ordering of plugs within the layout
# - "layout:section" places the plug in a named section of the layout
# - "divider" specifies whether or not a plug should be followed by a divider
# - "layout:widgetType" the class name for the widget type of a particular plug
# - "layout:activator" the name of an activator to control editability
#
# Per-parent metadata support :
#
# - layout:section:sectionName:summary" dynamic metadata entry returning a
# string to be used as a summary for the section.
# - layout:section:sectionName:collapsed" boolean indicating whether or
# not a section should be collapsed initially.
#
# Per-node metadata support :
#
# - "layout:activator:activatorName" a dynamic boolean metadata entry to control
# the activation of plugs within the layout
# - "layout:activators" a dynamic metadata entry returning a CompoundData of booleans
# for several named activators.
#
# ## Custom widgets
#
# Custom widgets unassociated with any specific plugs may also be added to plug layouts.
# This can be useful when customising user interfaces for a particular facility - for instance
# to display asset management information for each node.
#
# A custom widget is specified using parent metadata entries starting with
# "layout:customWidget:Name:" prefixes, where "Name" is a unique identifier for the
# custom widget :
#
# - "layout:customWidget:Name:widgetType" specifies a string containing the fully qualified
# name of a python callable which will be used to create the widget. This callable will be passed
# the same parent GraphComponent (node or plug) that the PlugLayout is being created for.
# - "layout:customWidget:Name:*" as for the standard per-plug "layout:*" metadata, so custom
# widgets may be assigned to a section, reordered, given activators etc.
#
class PlugLayout( GafferUI.Widget ) :
def __init__( self, parent, orientation = GafferUI.ListContainer.Orientation.Vertical, **kw ) :
assert( isinstance( parent, ( Gaffer.Node, Gaffer.Plug ) ) )
self.__layout = _TabLayout( orientation ) if isinstance( parent, Gaffer.Node ) else _CollapsibleLayout( orientation )
GafferUI.Widget.__init__( self, self.__layout, **kw )
self.__parent = parent
self.__readOnly = False
# we need to connect to the childAdded/childRemoved signals on
# the parent so we can update the ui when plugs are added and removed.
self.__childAddedConnection = parent.childAddedSignal().connect( Gaffer.WeakMethod( self.__childAddedOrRemoved ) )
self.__childRemovedConnection = parent.childRemovedSignal().connect( Gaffer.WeakMethod( self.__childAddedOrRemoved ) )
# since our layout is driven by metadata, we must respond dynamically
# to changes in that metadata.
self.__metadataChangedConnection = Gaffer.Metadata.plugValueChangedSignal().connect( Gaffer.WeakMethod( self.__plugMetadataChanged ) )
# and since our activations are driven by plug values, we must respond
# when the plugs are dirtied.
self.__plugDirtiedConnection = self.__node().plugDirtiedSignal().connect( Gaffer.WeakMethod( self.__plugDirtied ) )
# frequently events that trigger a ui update come in batches, so we
# perform the update lazily using a LazyMethod. the dirty variables
# keep track of the work we'll need to do in the update.
self.__layoutDirty = True
self.__activationsDirty = True
self.__summariesDirty = True
# mapping from layout item to widget, where the key is either a plug or
# the name of a custom widget (as returned by layoutOrder()).
self.__widgets = {}
self.__rootSection = _Section( self.__parent )
# schedule our first update, which will take place when we become
# visible for the first time.
self.__updateLazily()
def getReadOnly( self ) :
return self.__readOnly
def setReadOnly( self, readOnly ) :
if readOnly == self.getReadOnly() :
return
self.__readOnly = readOnly
if self.__readOnly :
for widget in self.__widgets.values() :
self.__applyReadOnly( widget, self.__readOnly )
else :
self.__updateActivations()
## Returns a PlugValueWidget representing the specified child plug.
# Because the layout is built lazily on demand, this might return None due
# to the user not having opened up the ui - in this case lazy=False may
# be passed to force the creation of the ui.
def plugValueWidget( self, childPlug, lazy=True ) :
if not lazy and len( self.__widgets ) == 0 :
self.__update()
w = self.__widgets.get( childPlug, None )
if w is None :
return w
elif isinstance( w, GafferUI.PlugValueWidget ) :
return w
else :
return w.plugValueWidget()
## Returns the custom widget registered with the specified name.
# Because the layout is built lazily on demand, this might return None due
# to the user not having opened up the ui - in this case lazy=False may
# be passed to force the creation of the ui.
def customWidget( self, name, lazy=True ) :
if not lazy and len( self.__widgets ) == 0 :
self.__update()
return self.__widgets.get( name )
## Returns the list of section names that will be used when laying
# out the plugs of the specified parent. The sections are returned
# in the order in which they will be created.
@classmethod
def layoutSections( cls, parent, includeCustomWidgets = False ) :
d = collections.OrderedDict()
for item in cls.layoutOrder( parent, includeCustomWidgets ) :
sectionPath = cls.__staticSectionPath(item, parent)
sectionName = ".".join( sectionPath )
d[sectionName] = 1
return d.keys()
## Returns the child plugs of the parent in the order in which they
# will be laid out, based on "layout:index" Metadata entries. If
# includeCustomWidgets is True, then the positions of custom widgets
# are represented by the appearance of the names of the widgets as
# strings within the list. If a section name is specified, then the
# result will be filtered to include only items in that section.
@classmethod
def layoutOrder( cls, parent, includeCustomWidgets = False, section = None ) :
items = parent.children( Gaffer.Plug )
items = [ plug for plug in items if not plug.getName().startswith( "__" ) ]
if includeCustomWidgets :
if isinstance( parent, Gaffer.Node ) :
metadataNames = Gaffer.Metadata.registeredNodeValues( parent )
else :
metadataNames = Gaffer.Metadata.registeredPlugValues( parent )
for name in metadataNames :
m = re.match( "layout:customWidget:(.+):widgetType", name )
if m :
items.append( m.group( 1 ) )
itemsAndIndices = [ list( x ) for x in enumerate( items ) ]
for itemAndIndex in itemsAndIndices :
index = cls.__staticItemMetadataValue( itemAndIndex[1], "index", parent )
if index is not None :
index = index if index >= 0 else sys.maxint + index
itemAndIndex[0] = index
itemsAndIndices.sort( key = lambda x : x[0] )
if section is not None :
sectionPath = section.split( "." ) if section else []
itemsAndIndices = [ x for x in itemsAndIndices if cls.__staticSectionPath( x[1], parent ) == sectionPath ]
return [ x[1] for x in itemsAndIndices ]
@GafferUI.LazyMethod()
def __updateLazily( self ) :
self.__update()
def __update( self ) :
if self.__layoutDirty :
self.__updateLayout()
self.__layoutDirty = False
if self.__activationsDirty :
self.__updateActivations()
self.__activationsDirty = False
if self.__summariesDirty :
self.__updateSummariesWalk( self.__rootSection )
self.__summariesDirty = False
# delegate to our layout class to create a concrete
# layout from the section definitions.
self.__layout.update( self.__rootSection )
def __updateLayout( self ) :
# get the items to lay out - these are a combination
# of plugs and strings representing custom widgets.
items = self.layoutOrder( self.__parent, includeCustomWidgets = True )
# ditch widgets we don't need any more
itemsSet = set( items )
self.__widgets = { k : v for k, v in self.__widgets.items() if k in itemsSet }
# make (or reuse existing) widgets for each item, and sort them into
# sections.
self.__rootSection.clear()
for item in items :
if item not in self.__widgets :
if isinstance( item, Gaffer.Plug ) :
widget = self.__createPlugWidget( item )
else :
widget = self.__createCustomWidget( item )
self.__widgets[item] = widget
else :
widget = self.__widgets[item]
if widget is None :
continue
section = self.__rootSection
for sectionName in self.__sectionPath( item ) :
section = section.subsection( sectionName )
section.widgets.append( widget )
if self.__itemMetadataValue( item, "divider" ) :
section.widgets.append( GafferUI.Divider(
GafferUI.Divider.Orientation.Horizontal if self.__layout.orientation() == GafferUI.ListContainer.Orientation.Vertical else GafferUI.Divider.Orientation.Vertical
) )
def __updateActivations( self ) :
if self.getReadOnly() :
return
activators = Gaffer.Metadata.nodeValue( self.__node(), "layout:activators" ) or {}
activators = { k : v.value for k, v in activators.items() } # convert CompoundData of BoolData to dict of booleans
for item, widget in self.__widgets.items() :
active = True
activatorName = self.__itemMetadataValue( item, "activator" )
if activatorName :
active = activators.get( activatorName )
if active is None :
active = Gaffer.Metadata.nodeValue( self.__node(), "layout:activator:" + activatorName )
active = active if active is not None else False
activators[activatorName] = active
self.__applyReadOnly( widget, not active )
def __updateSummariesWalk( self, section ) :
section.summary = self.__metadataValue( self.__parent, "layout:section:" + section.fullName + ":summary" ) or ""
for subsection in section.subsections.values() :
self.__updateSummariesWalk( subsection )
def __import( self, path ) :
path = path.split( "." )
result = __import__( path[0] )
for n in path[1:] :
result = getattr( result, n )
return result
def __createPlugWidget( self, plug ) :
widgetType = Gaffer.Metadata.plugValue( plug, "layout:widgetType" )
if widgetType is not None :
if widgetType == "None" :
return None
else :
widgetClass = self.__import( widgetType )
result = widgetClass( plug )
else :
result = GafferUI.PlugValueWidget.create( plug )
if result is None :
return result
if isinstance( result, GafferUI.PlugValueWidget ) and not result.hasLabel() and Gaffer.Metadata.plugValue( plug, "label" ) != "" :
result = GafferUI.PlugWidget( result )
if self.__layout.orientation() == GafferUI.ListContainer.Orientation.Horizontal :
# undo the annoying fixed size the PlugWidget has applied
# to the label.
## \todo Shift all the label size fixing out of PlugWidget and just fix the
# widget here if we're in a vertical orientation.
QWIDGETSIZE_MAX = 16777215 # qt #define not exposed by PyQt or PySide
result.labelPlugValueWidget().label()._qtWidget().setFixedWidth( QWIDGETSIZE_MAX )
self.__applyReadOnly( result, self.getReadOnly() )
return result
def __createCustomWidget( self, name ) :
widgetType = self.__itemMetadataValue( name, "widgetType" )
widgetClass = self.__import( widgetType )
return widgetClass( self.__parent )
def __node( self ) :
return self.__parent if isinstance( self.__parent, Gaffer.Node ) else self.__parent.node()
@classmethod
def __metadataValue( cls, plugOrNode, name ) :
if isinstance( plugOrNode, Gaffer.Node ) :
return Gaffer.Metadata.nodeValue( plugOrNode, name )
else :
return Gaffer.Metadata.plugValue( plugOrNode, name )
@classmethod
def __staticItemMetadataValue( cls, item, name, parent ) :
if isinstance( item, Gaffer.Plug ) :
##\todo Update "divider" and "label" items to use prefix too
if name not in ( "divider", "label" ) :
name = "layout:" + name
return Gaffer.Metadata.plugValue( item, name )
else :
return cls.__metadataValue( parent, "layout:customWidget:" + item + ":" + name )
def __itemMetadataValue( self, item, name ) :
return self.__staticItemMetadataValue( item, name, parent = self.__parent )
@classmethod
def __staticSectionPath( cls, item, parent ) :
m = None
if isinstance( parent, Gaffer.Node ) :
# Backwards compatibility with old metadata entry
## \todo Remove
m = cls.__staticItemMetadataValue( item, "nodeUI:section", parent )
if m == "header" :
m = ""
if m is None :
m = cls.__staticItemMetadataValue( item, "section", parent )
return m.split( "." ) if m else []
def __sectionPath( self, item ) :
return self.__staticSectionPath( item, parent = self.__parent )
def __childAddedOrRemoved( self, *unusedArgs ) :
# typically many children are added and removed at once, so
# we do a lazy update so we can batch up several changes into one.
# upheaval is over.
self.__layoutDirty = True
self.__updateLazily()
def __applyReadOnly( self, widget, readOnly ) :
if widget is None :
return
if hasattr( widget, "setReadOnly" ) :
widget.setReadOnly( readOnly )
elif isinstance( widget, GafferUI.PlugWidget ) :
widget.labelPlugValueWidget().setReadOnly( readOnly )
widget.plugValueWidget().setReadOnly( readOnly )
elif hasattr( widget, "plugValueWidget" ) :
widget.plugValueWidget().setReadOnly( readOnly )
def __plugMetadataChanged( self, nodeTypeId, plugPath, key, plug ) :
if not self.visible() :
return
if plug is not None and not self.__parent.isSame( plug.parent() ) :
return
if not self.__node().isInstanceOf( nodeTypeId ) :
return
if key in ( "divider", "layout:index", "layout:section" ) :
# we often see sequences of several metadata changes - so
# we schedule a lazy update to batch them into one ui update.
self.__layoutDirty = True
self.__updateLazily()
def __plugDirtied( self, plug ) :
if not self.visible() or plug.direction() != plug.Direction.In :
return
self.__activationsDirty = True
self.__summariesDirty = True
self.__updateLazily()
# The _Section class provides a simple abstract representation of a hierarchical
# layout. Each section contains a list of widgets to be displayed in that section,
# and an OrderedDict of named subsections.
class _Section( object ) :
def __init__( self, _parent, _fullName = "" ) :
self.__parent = _parent
self.fullName = _fullName
self.clear()
def subsection( self, name ) :
result = self.subsections.get( name )
if result is not None :
return result
result = _Section(
self.__parent,
self.fullName + "." + name if self.fullName else name
)
self.subsections[name] = result
return result
def clear( self ) :
self.widgets = []
self.subsections = collections.OrderedDict()
self.summary = ""
def saveState( self, name, value ) :
if isinstance( self.__parent, Gaffer.Node ) :
Gaffer.Metadata.registerNodeValue( self.__parent, self.__stateName( name ), value, persistent = False )
else :
Gaffer.Metadata.registerPlugValue( self.__parent, self.__stateName( name ), value, persistent = False )
def restoreState( self, name ) :
if isinstance( self.__parent, Gaffer.Node ) :
return Gaffer.Metadata.nodeValue( self.__parent, self.__stateName( name ) )
else :
return Gaffer.Metadata.plugValue( self.__parent, self.__stateName( name ) )
def __stateName( self, name ) :
return "layout:section:" + self.fullName + ":" + name
# The PlugLayout class deals with all the details of plugs, metadata and
# signals to define an abstract layout in terms of _Sections. It then
# delegates to the _Layout classes to create an actual layout in terms
# of Widgets. This allows us to present different layouts based on whether
# or the parent is a node (tabbed layout) or a plug (collapsible layout).
class _Layout( GafferUI.Widget ) :
def __init__( self, topLevelWidget, orientation, **kw ) :
GafferUI.Widget.__init__( self, topLevelWidget, **kw )
self.__orientation = orientation
def orientation( self ) :
return self.__orientation
def update( self, section ) :
raise NotImplementedError
class _TabLayout( _Layout ) :
def __init__( self, orientation, **kw ) :
self.__mainColumn = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical )
_Layout.__init__( self, self.__mainColumn, orientation, **kw )
with self.__mainColumn :
self.__widgetsColumn = GafferUI.ListContainer( self.orientation(), spacing = 4, borderWidth = 4 )
self.__tabbedContainer = GafferUI.TabbedContainer()
self.__currentTabChangedConnection = self.__tabbedContainer.currentChangedSignal().connect(
Gaffer.WeakMethod( self.__currentTabChanged )
)
def update( self, section ) :
self.__section = section
self.__widgetsColumn[:] = section.widgets
existingTabs = collections.OrderedDict()
for tab in self.__tabbedContainer[:] :
existingTabs[self.__tabbedContainer.getLabel( tab )] = tab
updatedTabs = collections.OrderedDict()
for name, subsection in section.subsections.items() :
tab = existingTabs.get( name )
if tab is None :
tab = GafferUI.ScrolledContainer( borderWidth = 8 )
if self.orientation() == GafferUI.ListContainer.Orientation.Vertical :
tab.setHorizontalMode( GafferUI.ScrolledContainer.ScrollMode.Never )
else :
tab.setVerticalMode( GafferUI.ScrolledContainer.ScrollMode.Never )
tab.setChild( _CollapsibleLayout( self.orientation() ) )
tab.getChild().update( subsection )
updatedTabs[name] = tab
if existingTabs.keys() != updatedTabs.keys() :
with Gaffer.BlockedConnection( self.__currentTabChangedConnection ) :
del self.__tabbedContainer[:]
for name, tab in updatedTabs.items() :
self.__tabbedContainer.append( tab, label = name )
if not len( existingTabs ) :
currentTabIndex = self.__section.restoreState( "currentTab" ) or 0
if currentTabIndex < len( self.__tabbedContainer ) :
self.__tabbedContainer.setCurrent( self.__tabbedContainer[currentTabIndex] )
self.__widgetsColumn.setVisible( len( section.widgets ) )
self.__tabbedContainer.setVisible( len( self.__tabbedContainer ) )
def __currentTabChanged( self, tabbedContainer, currentTab ) :
self.__section.saveState( "currentTab", tabbedContainer.index( currentTab ) )
class _CollapsibleLayout( _Layout ) :
def __init__( self, orientation, **kw ) :
self.__column = GafferUI.ListContainer( orientation, spacing = 4 )
_Layout.__init__( self, self.__column, orientation, **kw )
self.__collapsibles = {} # Indexed by section name
def update( self, section ) :
widgets = list( section.widgets )
for name, subsection in section.subsections.items() :
collapsible = self.__collapsibles.get( name )
if collapsible is None :
collapsible = GafferUI.Collapsible( name, _CollapsibleLayout( self.orientation() ), borderWidth = 2, collapsed = True )
collapsible.setCornerWidget( GafferUI.Label(), True )
## \todo This is fighting the default sizing applied in the Label constructor. Really we need a standard
# way of controlling size behaviours for all widgets in the public API.
collapsible.getCornerWidget()._qtWidget().setSizePolicy( QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed )
if subsection.restoreState( "collapsed" ) is False :
collapsible.setCollapsed( False )
collapsible.__stateChangedConnection = collapsible.stateChangedSignal().connect(
functools.partial( Gaffer.WeakMethod( self.__collapsibleStateChanged ), subsection = subsection )
)
self.__collapsibles[name] = collapsible
collapsible.getChild().update( subsection )
collapsible.getCornerWidget().setText(
"<small>" + " ( " + subsection.summary + " )</small>" if subsection.summary else ""
)
widgets.append( collapsible )
self.__column[:] = widgets
def __collapsibleStateChanged( self, collapsible, subsection ) :
subsection.saveState( "collapsed", collapsible.getCollapsed() )
|
|
'''Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
'''
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
from .. import backend as K
def random_rotation(x, rg, row_index=1, col_index=2, channel_index=0,
fill_mode='nearest', cval=0.):
theta = np.pi / 180 * np.random.uniform(-rg, rg)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_index=1, col_index=2, channel_index=0,
fill_mode='nearest', cval=0.):
h, w = x.shape[row_index], x.shape[col_index]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval)
return x
def random_shear(x, intensity, row_index=1, col_index=2, channel_index=0,
fill_mode='nearest', cval=0.):
shear = np.random.uniform(-intensity, intensity)
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_index=1, col_index=2, channel_index=0,
fill_mode='nearest', cval=0.):
if len(zoom_range) != 2:
raise Exception('zoom_range should be a tuple or list of two floats. '
'Received arg: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval)
return x
def random_barrel_transform(x, intensity):
# TODO
pass
def random_channel_shift(x, intensity, channel_index=0):
x = np.rollaxis(x, channel_index, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index+1)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x, transform_matrix, channel_index=0, fill_mode='nearest', cval=0.):
x = np.rollaxis(x, channel_index, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(x_channel, final_affine_matrix,
final_offset, order=0, mode=fill_mode, cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index+1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, dim_ordering='default', scale=True):
from PIL import Image
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
if dim_ordering == 'th':
x = x.transpose(1, 2, 0)
if scale:
x += max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return Image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return Image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise Exception('Unsupported channel number: ', x.shape[2])
def img_to_array(img, dim_ordering='default'):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
if dim_ordering not in ['th', 'tf']:
raise Exception('Unknown dim_ordering: ', dim_ordering)
# image has dim_ordering (height, width, channel)
x = np.asarray(img, dtype='float32')
if len(x.shape) == 3:
if dim_ordering == 'th':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if dim_ordering == 'th':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise Exception('Unsupported image shape: ', x.shape)
return x
def load_img(path, grayscale=False, target_size=None):
'''Load an image into PIL format.
# Arguments
path: path to image file
grayscale: boolean
target_size: None (default to original size)
or (img_height, img_width)
'''
from PIL import Image
img = Image.open(path)
if grayscale:
img = img.convert('L')
else: # Ensure 3 channel even when loaded image is grayscale
img = img.convert('RGB')
if target_size:
img = img.resize((target_size[1], target_size[0]))
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png'):
return [os.path.join(directory, f) for f in os.listdir(directory)
if os.path.isfile(os.path.join(directory, f)) and re.match('([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
'''Generate minibatches with
real-time data augmentation.
# Arguments
featurewise_center: set input mean to 0 over the dataset.
samplewise_center: set each sample mean to 0.
featurewise_std_normalization: divide inputs by std of the dataset.
samplewise_std_normalization: divide each input by its std.
zca_whitening: apply ZCA whitening.
rotation_range: degrees (0 to 180).
width_shift_range: fraction of total width.
height_shift_range: fraction of total height.
shear_range: shear intensity (shear angle in radians).
zoom_range: amount of zoom. if scalar z, zoom will be randomly picked
in the range [1-z, 1+z]. A sequence of two can be passed instead
to select this range.
channel_shift_range: shift range for each channels.
fill_mode: points outside the boundaries are filled according to the
given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default
is 'nearest'.
cval: value used for points outside the boundaries when fill_mode is
'constant'. Default is 0.
horizontal_flip: whether to randomly flip images horizontally.
vertical_flip: whether to randomly flip images vertically.
rescale: rescaling factor. If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided (before applying
any other transformation).
dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
(the depth) is at index 1, in 'tf' mode it is at index 3.
It defaults to the `image_dim_ordering` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "th".
'''
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
dim_ordering='default'):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
self.__dict__.update(locals())
self.mean = None
self.std = None
self.principal_components = None
self.rescale = rescale
if dim_ordering not in {'tf', 'th'}:
raise Exception('dim_ordering should be "tf" (channel after row and '
'column) or "th" (channel before row and column). '
'Received arg: ', dim_ordering)
self.dim_ordering = dim_ordering
if dim_ordering == 'th':
self.channel_index = 1
self.row_index = 2
self.col_index = 3
if dim_ordering == 'tf':
self.channel_index = 3
self.row_index = 1
self.col_index = 2
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise Exception('zoom_range should be a float or '
'a tuple or list of two floats. '
'Received arg: ', zoom_range)
def flow(self, X, y=None, batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_format='jpeg'):
return NumpyArrayIterator(
X, y, self,
batch_size=batch_size, shuffle=shuffle, seed=seed,
dim_ordering=self.dim_ordering,
save_to_dir=save_to_dir, save_prefix=save_prefix, save_format=save_format)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_format='jpeg'):
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
dim_ordering=self.dim_ordering,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir, save_prefix=save_prefix, save_format=save_format)
def standardize(self, x):
if self.rescale:
x *= self.rescale
# x is a single image, so it doesn't have image number at index 0
img_channel_index = self.channel_index - 1
if self.samplewise_center:
x -= np.mean(x, axis=img_channel_index, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, axis=img_channel_index, keepdims=True) + 1e-7)
if self.featurewise_center:
x -= self.mean
if self.featurewise_std_normalization:
x /= (self.std + 1e-7)
if self.zca_whitening:
flatx = np.reshape(x, (x.size))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, (x.shape[0], x.shape[1], x.shape[2]))
return x
def random_transform(self, x):
# x is a single image, so it doesn't have image number at index 0
img_row_index = self.row_index - 1
img_col_index = self.col_index - 1
img_channel_index = self.channel_index - 1
# use composition of homographies to generate final transform that needs to be applied
if self.rotation_range:
theta = np.pi / 180 * np.random.uniform(-self.rotation_range, self.rotation_range)
else:
theta = 0
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
if self.height_shift_range:
tx = np.random.uniform(-self.height_shift_range, self.height_shift_range) * x.shape[img_row_index]
else:
tx = 0
if self.width_shift_range:
ty = np.random.uniform(-self.width_shift_range, self.width_shift_range) * x.shape[img_col_index]
else:
ty = 0
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
if self.shear_range:
shear = np.random.uniform(-self.shear_range, self.shear_range)
else:
shear = 0
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = np.dot(np.dot(np.dot(rotation_matrix, translation_matrix), shear_matrix), zoom_matrix)
h, w = x.shape[img_row_index], x.shape[img_col_index]
transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_index,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x, self.channel_shift_range, img_channel_index)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_index)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_index)
# TODO:
# channel-wise normalization
# barrel/fisheye
return x
def fit(self, X,
augment=False,
rounds=1,
seed=None):
'''Required for featurewise_center, featurewise_std_normalization
and zca_whitening.
# Arguments
X: Numpy array, the data to fit on.
augment: whether to fit on randomly augmented samples
rounds: if `augment`,
how many augmentation passes to do over the data
seed: random seed.
'''
X = np.copy(X)
if augment:
aX = np.zeros(tuple([rounds * X.shape[0]] + list(X.shape)[1:]))
for r in range(rounds):
for i in range(X.shape[0]):
aX[i + r * X.shape[0]] = self.random_transform(X[i])
X = aX
if self.featurewise_center:
self.mean = np.mean(X, axis=0)
X -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(X, axis=0)
X /= (self.std + 1e-7)
if self.zca_whitening:
flatX = np.reshape(X, (X.shape[0], X.shape[1] * X.shape[2] * X.shape[3]))
sigma = np.dot(flatX.T, flatX) / flatX.shape[1]
U, S, V = linalg.svd(sigma)
self.principal_components = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + 10e-7))), U.T)
class Iterator(object):
def __init__(self, N, batch_size, shuffle, seed):
self.N = N
self.batch_size = batch_size
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_generator = self._flow_index(N, batch_size, shuffle, seed)
def reset(self):
self.batch_index = 0
def _flow_index(self, N, batch_size=32, shuffle=False, seed=None):
# ensure self.batch_index is 0
self.reset()
while 1:
if self.batch_index == 0:
index_array = np.arange(N)
if shuffle:
if seed is not None:
np.random.seed(seed + self.total_batches_seen)
index_array = np.random.permutation(N)
current_index = (self.batch_index * batch_size) % N
if N >= current_index + batch_size:
current_batch_size = batch_size
self.batch_index += 1
else:
current_batch_size = N - current_index
self.batch_index = 0
self.total_batches_seen += 1
yield (index_array[current_index: current_index + current_batch_size],
current_index, current_batch_size)
def __iter__(self):
# needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
class NumpyArrayIterator(Iterator):
def __init__(self, X, y, image_data_generator,
batch_size=32, shuffle=False, seed=None,
dim_ordering='default',
save_to_dir=None, save_prefix='', save_format='jpeg'):
if y is not None and len(X) != len(y):
raise Exception('X (images tensor) and y (labels) '
'should have the same length. '
'Found: X.shape = %s, y.shape = %s' % (np.asarray(X).shape, np.asarray(y).shape))
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
self.X = X
self.y = y
self.image_data_generator = image_data_generator
self.dim_ordering = dim_ordering
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(X.shape[0], batch_size, shuffle, seed)
def next(self):
# for python 2.x.
# Keeps under lock only the mechanism which advances
# the indexing of each batch
# see http://anandology.com/blog/using-iterators-and-generators/
with self.lock:
index_array, current_index, current_batch_size = next(self.index_generator)
# The transformation of images is not under thread lock so it can be done in parallel
batch_x = np.zeros(tuple([current_batch_size] + list(self.X.shape)[1:]))
for i, j in enumerate(index_array):
x = self.X[j]
x = self.image_data_generator.random_transform(x.astype('float32'))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i in range(current_batch_size):
img = array_to_img(batch_x[i], self.dim_ordering, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
index=current_index + i,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
if self.y is None:
return batch_x
batch_y = self.y[index_array]
return batch_x, batch_y
class DirectoryIterator(Iterator):
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
dim_ordering='default',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_format='jpeg'):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.dim_ordering = dim_ordering
if self.color_mode == 'rgb':
if self.dim_ordering == 'tf':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.dim_ordering == 'tf':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp'}
# first, count the number of samples and classes
self.nb_sample = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.nb_class = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
for subdir in classes:
subpath = os.path.join(directory, subdir)
for fname in os.listdir(subpath):
is_valid = False
for extension in white_list_formats:
if fname.lower().endswith('.' + extension):
is_valid = True
break
if is_valid:
self.nb_sample += 1
print('Found %d images belonging to %d classes.' % (self.nb_sample, self.nb_class))
# second, build an index of the images in the different class subfolders
self.filenames = []
self.classes = np.zeros((self.nb_sample,), dtype='int32')
i = 0
for subdir in classes:
subpath = os.path.join(directory, subdir)
for fname in os.listdir(subpath):
is_valid = False
for extension in white_list_formats:
if fname.lower().endswith('.' + extension):
is_valid = True
break
if is_valid:
self.classes[i] = self.class_indices[subdir]
self.filenames.append(os.path.join(subdir, fname))
i += 1
super(DirectoryIterator, self).__init__(self.nb_sample, batch_size, shuffle, seed)
def next(self):
with self.lock:
index_array, current_index, current_batch_size = next(self.index_generator)
# The transformation of images is not under thread lock so it can be done in parallel
batch_x = np.zeros((current_batch_size,) + self.image_shape)
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname), grayscale=grayscale, target_size=self.target_size)
x = img_to_array(img, dim_ordering=self.dim_ordering)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i in range(current_batch_size):
img = array_to_img(batch_x[i], self.dim_ordering, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
index=current_index + i,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype('float32')
elif self.class_mode == 'categorical':
batch_y = np.zeros((len(batch_x), self.nb_class), dtype='float32')
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
|
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import math
import six.moves.cPickle as pickle
import random
import collections
def save_file(data, filename):
"""
Save data into pickle format.
data: the data to save.
filename: the output filename.
"""
pickle.dump(data, open(filename, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
def save_list(l, outfile):
"""
Save a list of string into a text file. There is one line for each string.
l: the list of string to save
outfile: the output file
"""
open(outfile, "w").write("\n".join(l))
def exclude_pattern(f):
"""
Return whether f is in the exlucde pattern.
Exclude the files that starts with . or ends with ~.
"""
return f.startswith(".") or f.endswith("~")
def list_dirs(path):
"""
Return a list of directories in path. Exclude all the directories that
start with '.'.
path: the base directory to search over.
"""
return [
os.path.join(path, d) for d in next(os.walk(path))[1]
if not exclude_pattern(d)
]
def list_images(path, exts=set(["jpg", "png", "bmp", "jpeg"])):
"""
Return a list of images in path.
path: the base directory to search over.
exts: the extensions of the images to find.
"""
return [os.path.join(path, d) for d in os.listdir(path) \
if os.path.isfile(os.path.join(path, d)) and not exclude_pattern(d)\
and os.path.splitext(d)[-1][1:] in exts]
def list_files(path):
"""
Return a list of files in path.
path: the base directory to search over.
exts: the extensions of the images to find.
"""
return [os.path.join(path, d) for d in os.listdir(path) \
if os.path.isfile(os.path.join(path, d)) and not exclude_pattern(d)]
def get_label_set_from_dir(path):
"""
Return a dictionary of the labels and label ids from a path.
Assume each direcotry in the path corresponds to a unique label.
The keys of the dictionary is the label name.
The values of the dictionary is the label id.
"""
dirs = list_dirs(path)
return dict([(os.path.basename(d), i) for i, d in enumerate(sorted(dirs))])
class Label:
"""
A class of label data.
"""
def __init__(self, label, name):
"""
label: the id of the label.
name: the name of the label.
"""
self.label = label
self.name = name
def convert_to_paddle_format(self):
"""
convert the image into the paddle batch format.
"""
return int(self.label)
def __hash__(self):
return hash((self.label))
class Dataset:
"""
A class to represent a dataset. A dataset contains a set of items.
Each item contains multiple slots of data.
For example: in image classification dataset, each item contains two slot,
The first slot is an image, and the second slot is a label.
"""
def __init__(self, data, keys):
"""
data: a list of data.
Each data is a tuple containing multiple slots of data.
Each slot is an object with convert_to_paddle_format function.
keys: contains a list of keys for all the slots.
"""
self.data = data
self.keys = keys
def check_valid(self):
for d in self.data:
assert (len(d) == len(self.keys))
def permute(self, key_id, num_per_batch):
"""
Permuate data for batching. It supports two types now:
1. if key_id == None, the batching process is completely random.
2. if key_id is not None. The batching process Permuate the data so that the key specified by key_id are
uniformly distributed in batches. See the comments of permute_by_key for details.
"""
if key_id is None:
self.uniform_permute()
else:
self.permute_by_key(key_id, num_per_batch)
def uniform_permute(self):
"""
Permuate the data randomly.
"""
random.shuffle(self.data)
def permute_by_key(self, key_id, num_per_batch):
"""
Permuate the data so that the key specified by key_id are
uniformly distributed in batches.
For example: if we have three labels, and the number of data
for each label are 100, 200, and 300, respectively. The number of batches is 4.
Then, the number of data for these labels is 25, 50, and 75.
"""
# Store the indices of the data that has the key value
# specified by key_id.
keyvalue_indices = collections.defaultdict(list)
for idx in range(len(self.data)):
keyvalue_indices[self.data[idx][key_id].label].append(idx)
for k in keyvalue_indices:
random.shuffle(keyvalue_indices[k])
num_data_per_key_batch = \
math.ceil(num_per_batch / float(len(list(keyvalue_indices.keys()))))
if num_data_per_key_batch < 2:
raise Exception("The number of data in a batch is too small")
permuted_data = []
keyvalue_readpointer = collections.defaultdict(int)
while len(permuted_data) < len(self.data):
for k in keyvalue_indices:
begin_idx = keyvalue_readpointer[k]
end_idx = int(
min(begin_idx + num_data_per_key_batch,
len(keyvalue_indices[k])))
print("begin_idx, end_idx")
print(begin_idx, end_idx)
for idx in range(begin_idx, end_idx):
permuted_data.append(self.data[keyvalue_indices[k][idx]])
keyvalue_readpointer[k] = end_idx
self.data = permuted_data
class DataBatcher:
"""
A class that is used to create batches for both training and testing
datasets.
"""
def __init__(self, train_data, test_data, label_set):
"""
train_data, test_data: Each one is a dataset object repesenting
training and testing data, respectively.
label_set: a dictionary storing the mapping from label name to label id.
"""
self.train_data = train_data
self.test_data = test_data
self.label_set = label_set
self.num_per_batch = 5000
assert (self.train_data.keys == self.test_data.keys)
def create_batches_and_list(self, output_path, train_list_name,
test_list_name, label_set_name):
"""
Create batches for both training and testing objects.
It also create train.list and test.list to indicate the list
of the batch files for training and testing data, respectively.
"""
train_list = self.create_batches(self.train_data, output_path, "train_",
self.num_per_batch)
test_list = self.create_batches(self.test_data, output_path, "test_",
self.num_per_batch)
save_list(train_list, os.path.join(output_path, train_list_name))
save_list(test_list, os.path.join(output_path, test_list_name))
save_file(self.label_set, os.path.join(output_path, label_set_name))
def create_batches(self,
data,
output_path,
prefix="",
num_data_per_batch=5000):
"""
Create batches for a Dataset object.
data: the Dataset object to process.
output_path: the output path of the batches.
prefix: the prefix of each batch.
num_data_per_batch: number of data in each batch.
"""
num_batches = int(math.ceil(len(data.data) / float(num_data_per_batch)))
batch_names = []
data.check_valid()
num_slots = len(data.keys)
for i in range(num_batches):
batch_name = os.path.join(output_path, prefix + "batch_%03d" % i)
out_data = dict([(k, []) for k in data.keys])
begin_idx = i * num_data_per_batch
end_idx = min((i + 1) * num_data_per_batch, len(data.data))
for j in range(begin_idx, end_idx):
for slot_id in range(num_slots):
out_data[data.keys[slot_id]].\
append(data.data[j][slot_id].convert_to_paddle_format())
save_file(out_data, batch_name)
batch_names.append(batch_name)
return batch_names
class DatasetCreater(object):
"""
A virtual class for creating datasets.
The derived clasas needs to implemnt the following methods:
- create_dataset()
- create_meta_file()
"""
def __init__(self, data_path):
"""
data_path: the path to store the training data and batches.
train_dir_name: relative training data directory.
test_dir_name: relative testing data directory.
batch_dir_name: relative batch directory.
num_per_batch: the number of data in a batch.
meta_filename: the filename of the meta file.
train_list_name: training batch list name.
test_list_name: testing batch list name.
label_set: label set name.
overwrite: whether to overwrite the files if the batches are already in
the given path.
"""
self.data_path = data_path
self.train_dir_name = 'train'
self.test_dir_name = 'test'
self.batch_dir_name = 'batches'
self.num_per_batch = 50000
self.meta_filename = "batches.meta"
self.train_list_name = "train.list"
self.test_list_name = "test.list"
self.label_set_name = "labels.pkl"
self.output_path = os.path.join(self.data_path, self.batch_dir_name)
self.overwrite = False
self.permutate_key = "labels"
self.from_list = False
def create_meta_file(self, data):
"""
Create a meta file from training data.
data: training data given in a Dataset format.
"""
raise NotImplementedError
def create_dataset(self, path):
"""
Create a data set object from a path.
It will use directory structure or a file list to determine dataset if
self.from_list is True. Otherwise, it will uses a file list to
determine the datset.
path: the path of the dataset.
return a tuple of Dataset object, and a mapping from lable set
to label id.
"""
if self.from_list:
return self.create_dataset_from_list(path)
else:
return self.create_dataset_from_dir(path)
def create_dataset_from_list(self, path):
"""
Create a data set object from a path.
It will uses a file list to determine the datset.
path: the path of the dataset.
return a tuple of Dataset object, and a mapping from lable set
to label id
"""
raise NotImplementedError
def create_dataset_from_dir(self, path):
"""
Create a data set object from a path.
It will use directory structure or a file list to determine dataset if
self.from_list is True.
path: the path of the dataset.
return a tuple of Dataset object, and a mapping from lable set
to label id
"""
raise NotImplementedError
def create_batches(self):
"""
create batches and meta file.
"""
train_path = os.path.join(self.data_path, self.train_dir_name)
test_path = os.path.join(self.data_path, self.test_dir_name)
out_path = os.path.join(self.data_path, self.batch_dir_name)
if not os.path.exists(out_path):
os.makedirs(out_path)
if (self.overwrite or not os.path.exists(
os.path.join(out_path, self.train_list_name))):
train_data, train_label_set = \
self.create_dataset(train_path)
test_data, test_label_set = \
self.create_dataset(test_path)
train_data.permute(
self.keys.index(self.permutate_key), self.num_per_batch)
assert (train_label_set == test_label_set)
data_batcher = DataBatcher(train_data, test_data, train_label_set)
data_batcher.num_per_batch = self.num_per_batch
data_batcher.create_batches_and_list(
self.output_path, self.train_list_name, self.test_list_name,
self.label_set_name)
self.num_classes = len(list(train_label_set.keys()))
self.create_meta_file(train_data)
return out_path
|
|
"""
Compliance Checker suite runner
"""
import os
import sys
import subprocess
import inspect
import itertools
from operator import itemgetter
from netCDF4 import Dataset
from lxml import etree as ET
from distutils.version import StrictVersion
from compliance_checker.base import fix_return_value, Result, GenericFile
from compliance_checker.cf.cf import CFBaseCheck
from owslib.sos import SensorObservationService
from owslib.swe.sensor.sml import SensorML
from compliance_checker.protocols import opendap, netcdf, cdl
from compliance_checker.base import BaseCheck
from compliance_checker import MemoizedDataset
from collections import defaultdict
import warnings
from urllib.parse import urlparse
from datetime import datetime
import requests
import codecs
import re
import textwrap
from pkg_resources import working_set
# Ensure output is encoded as Unicode when checker output is redirected or piped
if sys.stdout.encoding is None:
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
if sys.stderr.encoding is None:
sys.stderr = codecs.getwriter('utf8')(sys.stderr)
def extract_docstring_summary(docstring):
"""
Returns a dedented docstring without parameter information
:param docstring: A docstring
:type docstring: str
:returns: str
"""
# return a dedented, then indented two spaces docstring with leading and
# trailing whitespace removed.
return re.sub(r'^(?=.)', ' ',
textwrap.dedent(re.split(r'\n\s*:\w', docstring,
flags=re.MULTILINE)[0]).strip(),
flags=re.MULTILINE)
class CheckSuite(object):
checkers = {} # Base dict of checker names to BaseCheck derived types, override this in your CheckSuite implementation
templates_root = 'compliance_checker' # modify to load alternative Jinja2 templates
def __init__(self, options=None):
self.col_width = 40
self.options = options or {}
@classmethod
def _get_generator_plugins(cls):
"""
Return a list of classes from external plugins that are used to
generate checker classes
"""
if not hasattr(cls, 'suite_generators'):
gens = working_set.iter_entry_points('compliance_checker.generators')
cls.suite_generators = [x.resolve() for x in gens]
return cls.suite_generators
def _print_suites(self, verbose=0):
"""
Prints out available check suites. If the verbose argument is True,
includes the internal module version number of the check and also displays
"latest" meta-versions.
:param check_suite: Check suite object
:param verbose: Integer indicating whether to print verbose output
:type verbose: int
"""
for checker in sorted(self.checkers.keys()):
version = getattr(self.checkers[checker],
'_cc_checker_version', "???")
if verbose > 0:
print(" - {} (v{})".format(checker, version))
elif ':' in checker and not checker.endswith(':latest'): # Skip the "latest" output
print(" - {}".format(checker))
def _print_checker(self, checker_obj):
"""
Prints each available check and a description with an abridged
docstring for a given checker object
:param checker_obj: Checker object on which to operate
:type checker_obj: subclass of compliance_checker.base.BaseChecker
"""
check_functions = self._get_checks(checker_obj,
defaultdict(lambda: None))
for c, _ in check_functions:
print("- {}".format(c.__name__))
if c.__doc__ is not None:
u_doc = c.__doc__
print("\n{}\n".format(extract_docstring_summary(u_doc)))
@classmethod
def add_plugin_args(cls, parser):
"""
Add command line arguments for external plugins that generate checker
classes
"""
for gen in cls._get_generator_plugins():
gen.add_arguments(parser)
@classmethod
def load_generated_checkers(cls, args):
"""
Load checker classes from generator plugins
"""
for gen in cls._get_generator_plugins():
checkers = gen.get_checkers(args)
cls.checkers.update(checkers)
@classmethod
def load_all_available_checkers(cls):
"""
Helper method to retrieve all sub checker classes derived from various
base classes.
"""
cls._load_checkers(working_set
.iter_entry_points('compliance_checker.suites'))
@classmethod
def _load_checkers(cls, checkers):
"""
Loads up checkers in an iterable into the class checkers dict
:param checkers: An iterable containing the checker objects
"""
for c in checkers:
try:
check_obj = c.resolve()
if (hasattr(check_obj, '_cc_spec') and
hasattr(check_obj, '_cc_spec_version')):
check_version_str = ':'.join((check_obj._cc_spec,
check_obj._cc_spec_version))
cls.checkers[check_version_str] = check_obj
# TODO: remove this once all checkers move over to the new
# _cc_spec, _cc_spec_version
else:
# if _cc_spec and _cc_spec_version attributes aren't
# present, fall back to using name attribute
checker_name = (getattr(check_obj, 'name', None) or
getattr(check_obj, '_cc_spec', None))
warnings.warn('Checker for {} should implement both '
'"_cc_spec" and "_cc_spec_version" '
'attributes. "name" attribute is deprecated. '
'Assuming checker is latest version.',
DeprecationWarning)
# append "unknown" to version string since no versioning
# info was provided
cls.checkers["{}:unknown".format(checker_name)] = check_obj
except Exception as e:
print("Could not load", c, ":", e, file=sys.stderr)
# find the latest version of versioned checkers and set that as the
# default checker for compliance checker if no version is specified
ver_checkers = sorted([c.split(':', 1) for c
in cls.checkers if ':' in c])
for spec, versions in itertools.groupby(ver_checkers, itemgetter(0)):
version_nums = [v[-1] for v in versions]
try:
latest_version = str(max(StrictVersion(v) for v
in version_nums))
# if the version can't be parsed as a StrictVersion, parse
# according to character collation
except ValueError:
latest_version = max(version_nums)
cls.checkers[spec] = cls.checkers[spec + ':latest'] = \
cls.checkers[':'.join((spec, latest_version))]
def _get_checks(self, checkclass, skip_checks):
"""
Helper method to retreive check methods from a Checker class. Excludes
any checks in `skip_checks`.
The name of the methods in the Checker class should start with "check_"
for this method to find them.
"""
meths = inspect.getmembers(checkclass, inspect.isroutine)
# return all check methods not among the skipped checks
returned_checks = []
for fn_name, fn_obj in meths:
if (fn_name.startswith("check_") and
skip_checks[fn_name] != BaseCheck.HIGH):
returned_checks.append((fn_obj, skip_checks[fn_name]))
return returned_checks
def _run_check(self, check_method, ds, max_level):
"""
Runs a check and appends a result to the values list.
@param bound method check_method: a given check method
@param netCDF4 dataset ds
@param int max_level: check level
@return list: list of Result objects
"""
val = check_method(ds)
if hasattr(val, '__iter__'):
# Handle OrderedDict when we need to modify results in a superclass
# i.e. some checks in CF 1.7 which extend CF 1.6 behaviors
if isinstance(val, dict):
val_iter = val.values()
else:
val_iter = val
check_val = []
for v in val_iter:
res = fix_return_value(v, check_method.__func__.__name__,
check_method, check_method.__self__)
if max_level is None or res.weight > max_level:
check_val.append(res)
return check_val
else:
check_val = fix_return_value(val, check_method.__func__.__name__,
check_method, check_method.__self__)
if max_level is None or check_val.weight > max_level:
return [check_val]
else:
return []
def _get_check_versioned_name(self, check_name):
"""
The compliance checker allows the user to specify a
check without a version number but we want the report
to specify the version number.
Returns the check name with the version number it checked
"""
if ':' not in check_name or ':latest' in check_name:
check_name = ':'.join((check_name.split(':')[0],
self.checkers[check_name]._cc_spec_version))
return check_name
def _get_check_url(self, check_name):
"""
Return the check's reference URL if it exists. If not, return emtpy str.
@param check_name str: name of the check being run returned by
_get_check_versioned_name()
"""
return getattr(self.checkers[check_name], '_cc_url', '')
def _get_valid_checkers(self, ds, checker_names):
"""
Returns a filtered list of 2-tuples: (name, valid checker) based on the ds object's type and
the user selected names.
"""
assert len(self.checkers) > 0, "No checkers could be found."
if len(checker_names) == 0:
checker_names = list(self.checkers.keys())
args = [(name, self.checkers[name]) for name in checker_names if name in self.checkers]
valid = []
all_checked = set(a[1] for a in args) # only class types
checker_queue = set(args)
while len(checker_queue):
name, a = checker_queue.pop()
# is the current dataset type in the supported filetypes
# for the checker class?
if type(ds) in a().supported_ds:
valid.append((name, a))
# add subclasses of SOS checks
if "ioos_sos" in name:
for subc in a.__subclasses__():
if subc not in all_checked:
all_checked.add(subc)
checker_queue.add((name, subc))
return valid
@classmethod
def _process_skip_checks(cls, skip_checks):
"""
Processes an iterable of skip_checks with strings and returns a dict
with <check_name>: <max_skip_level> pairs
"""
check_dict = defaultdict(lambda: None)
# A is for "all", "M" is for medium, "L" is for low
check_lookup = {'A': BaseCheck.HIGH,
'M': BaseCheck.MEDIUM,
'L': BaseCheck.LOW}
for skip_check_spec in skip_checks:
split_check_spec = skip_check_spec.split(':')
check_name = split_check_spec[0]
if len(split_check_spec) < 2:
check_max_level = BaseCheck.HIGH
else:
try:
check_max_level = check_lookup[split_check_spec[1]]
except KeyError:
warnings.warn("Skip specifier '{}' on check '{}' not found,"
" defaulting to skip entire check".format(split_check_spec[1], check_name))
check_max_level = BaseCheck.HIGH
check_dict[check_name] = check_max_level
return check_dict
def run(self, ds, skip_checks, *checker_names):
"""
Runs this CheckSuite on the dataset with all the passed Checker instances.
Returns a dictionary mapping checker names to a 2-tuple of their grouped scores and errors/exceptions while running checks.
"""
ret_val = {}
checkers = self._get_valid_checkers(ds, checker_names)
if skip_checks is not None:
skip_check_dict = CheckSuite._process_skip_checks(skip_checks)
else:
skip_check_dict = defaultdict(lambda: None)
if len(checkers) == 0:
print("No valid checkers found for tests '{}'".format(",".join(checker_names)))
for checker_name, checker_class in checkers:
# TODO: maybe this a little more reliable than depending on
# a string to determine the type of the checker -- perhaps
# use some kind of checker object with checker type and
# version baked in
checker_type_name = checker_name.split(':')[0]
checker_opts = self.options.get(checker_type_name, set())
# instantiate a Checker object
try:
checker = checker_class(options=checker_opts)
# hacky fix for no options in constructor
except TypeError:
checker = checker_class()
# TODO? : Why is setup(ds) called at all instead of just moving the
# checker setup into the constructor?
# setup method to prep
checker.setup(ds)
checks = self._get_checks(checker, skip_check_dict)
vals = []
errs = {} # check method name -> (exc, traceback)
for c, max_level in checks:
try:
vals.extend(self._run_check(c, ds, max_level))
except Exception as e:
errs[c.__func__.__name__] = (e, sys.exc_info()[2])
# score the results we got back
groups = self.scores(vals)
ret_val[checker_name] = groups, errs
return ret_val
@classmethod
def passtree(cls, groups, limit):
for r in groups:
if r.children:
x = cls.passtree(r.children, limit)
if r.weight >= limit and x is False:
return False
if r.weight >= limit and r.value[0] != r.value[1]:
return False
return True
def build_structure(self, check_name, groups, source_name, limit=1):
'''
Compiles the checks, results and scores into an aggregate structure which looks like:
{
"scored_points": 396,
"low_count": 0,
"possible_points": 400,
"testname": "gliderdac",
"medium_count": 2,
"source_name": ".//rutgers/ru01-20140120T1444/ru01-20140120T1649.nc",
"high_count": 0,
"all_priorities" : [...],
"high_priorities": [...],
"medium_priorities" : [...],
"low_priorities" : [...]
}
@param check_name The test which was run
@param groups List of results from compliance checker
@param source_name Source of the dataset, used for title
'''
aggregates = {}
aggregates['scored_points'] = 0
aggregates['possible_points'] = 0
high_priorities = []
medium_priorities = []
low_priorities = []
all_priorities = []
aggregates['high_count'] = 0
aggregates['medium_count'] = 0
aggregates['low_count'] = 0
def named_function(result):
for child in result.children:
all_priorities.append(child)
named_function(child)
# For each result, bin them into the appropriate category, put them all
# into the all_priorities category and add up the point values
for res in groups:
if res.weight < limit:
continue
# If the result has 0 possible points, then it was not valid for
# this dataset and contains no meaningful information
if res.value[1] == 0:
continue
aggregates['scored_points'] += res.value[0]
aggregates['possible_points'] += res.value[1]
if res.weight == 3:
high_priorities.append(res)
if res.value[0] < res.value[1]:
aggregates['high_count'] += 1
elif res.weight == 2:
medium_priorities.append(res)
if res.value[0] < res.value[1]:
aggregates['medium_count'] += 1
else:
low_priorities.append(res)
if res.value[0] < res.value[1]:
aggregates['low_count'] += 1
all_priorities.append(res)
# Some results have children
# We don't render children inline with the top three tables, but we
# do total the points and display the messages
named_function(res)
aggregates['high_priorities'] = high_priorities
aggregates['medium_priorities'] = medium_priorities
aggregates['low_priorities'] = low_priorities
aggregates['all_priorities'] = all_priorities
aggregates['testname'] = self._get_check_versioned_name(check_name)
aggregates['source_name'] = source_name
aggregates['scoreheader'] = self.checkers[check_name]._cc_display_headers
aggregates['cc_spec_version'] = self.checkers[check_name]._cc_spec_version
aggregates['cc_url'] = self._get_check_url(aggregates['testname'])
return aggregates
def dict_output(self, check_name, groups, source_name, limit):
'''
Builds the results into a JSON structure and writes it to the file buffer.
@param check_name The test which was run
@param groups List of results from compliance checker
@param output_filename Path to file to save output
@param source_name Source of the dataset, used for title
@param limit Integer value for limiting output
'''
aggregates = self.build_structure(check_name, groups, source_name, limit)
return self.serialize(aggregates)
def serialize(self, o):
'''
Returns a safe serializable object that can be serialized into JSON.
@param o Python object to serialize
'''
if isinstance(o, (list, tuple)):
return [self.serialize(i) for i in o]
if isinstance(o, dict):
return {k: self.serialize(v) for k, v in o.items()}
if isinstance(o, datetime):
return o.isoformat()
if isinstance(o, Result):
return self.serialize(o.serialize())
return o
def checker_html_output(self, check_name, groups, source_name, limit):
'''
Renders the HTML output for a single test using Jinja2 and returns it
as a string.
@param check_name The test which was run
@param groups List of results from compliance checker
@param source_name Source of the dataset, used for title
@param limit Integer value for limiting output
'''
from jinja2 import Environment, PackageLoader
self.j2 = Environment(loader=PackageLoader(self.templates_root, 'data/templates'))
template = self.j2.get_template('ccheck.html.j2')
template_vars = self.build_structure(check_name, groups, source_name, limit)
return template.render(**template_vars)
def html_output(self, checkers_html):
'''
Renders the HTML output for multiple tests and returns it as a string.
@param checkers_html List of HTML for single tests as returned by
checker_html_output
'''
# Note: This relies on checker_html_output having been called so that
# self.j2 is initialised
template = self.j2.get_template('ccheck_wrapper.html.j2')
return template.render(checkers=checkers_html)
def get_points(self, groups, limit):
score_list = []
score_only_list = []
for g in groups:
if g.weight >= limit:
score_only_list.append(g.value)
# checks where all pertinent sections passed
all_passed = sum(x[0] == x[1] for x in score_only_list)
out_of = len(score_only_list)
# sorts lists into high/medium/low order
score_list.sort(key=lambda x: x.weight, reverse=True)
return score_list, all_passed, out_of
def standard_output(self, ds, limit, check_name, groups):
"""
Generates the Terminal Output for Standard cases
Returns the dataset needed for the verbose output, as well as the failure flags.
"""
score_list, points, out_of = self.get_points(groups, limit)
issue_count = out_of - points
# Let's add the version number to the check name if it's missing
check_name = self._get_check_versioned_name(check_name)
check_url = self._get_check_url(check_name)
width = 2 * self.col_width
print('\n')
print("-" * width)
print('{:^{width}}'.format("IOOS Compliance Checker Report", width=width))
print('{:^{width}}'.format(check_name, width=width))
print('{:^{width}}'.format(check_url, width=width))
print("-" * width)
if issue_count > 0:
print('{:^{width}}'.format("Corrective Actions", width=width))
plural = '' if issue_count == 1 else 's'
print("{} has {} potential issue{}".format(os.path.basename(ds), issue_count, plural))
return [groups, points, out_of]
def standard_output_generation(self, groups, limit, points, out_of, check):
'''
Generates the Terminal Output
'''
if points < out_of:
self.reasoning_routine(groups, check, priority_flag=limit)
else:
print("All tests passed!")
def reasoning_routine(self, groups, check, priority_flag=3,
_top_level=True):
"""
print routine performed
@param list groups: the Result groups
@param str check: checker name
@param int priority_flag: indicates the weight of the groups
@param bool _top_level: indicates the level of the group so as to
print out the appropriate header string
"""
sort_fn = lambda x: x.weight
groups_sorted = sorted(groups, key=sort_fn, reverse=True)
# create dict of the groups -> {level: [reasons]}
result = {key: [v for v in valuesiter if v.value[0] != v.value[1]]
for key, valuesiter in itertools.groupby(groups_sorted,
key=sort_fn)}
priorities = self.checkers[check]._cc_display_headers
def process_table(res, check):
"""Recursively calls reasoning_routine to parse out child reasons
from the parent reasons.
@param Result res: Result object
@param str check: checker name"""
issue = res.name
if not res.children:
reasons = res.msgs
else:
child_reasons = self.reasoning_routine(res.children,
check, _top_level=False)
# there shouldn't be messages if there are children
# is this a valid assumption?
reasons = child_reasons
return issue, reasons
# iterate in reverse to the min priority requested;
# the higher the limit, the more lenient the output
proc_strs = ""
for level in range(3, priority_flag - 1, -1):
level_name = priorities.get(level, level)
# print headers
proc_strs = []
# skip any levels that aren't in the result
if level not in result:
continue
# skip any empty result levels
if len(result[level]) > 0:
# only print priority headers at top level, i.e. non-child
# datasets
if _top_level:
width = 2 * self.col_width
print("\n")
print('{:^{width}}'.format(level_name, width=width))
print("-" * width)
data_issues = [process_table(res, check) for res in result[level]]
has_printed = False
for issue, reasons in data_issues:
# if this isn't the first printed issue, add a newline
# separating this and the previous level
if has_printed:
print("")
# join alphabetized reasons together
reason_str = "\n".join('* {}'.format(r) for r in sorted(reasons, key=lambda x: x[0]))
proc_str = "{}\n{}".format(issue, reason_str)
print(proc_str)
proc_strs.append(proc_str)
has_printed = True
return "\n".join(proc_strs)
def process_doc(self, doc):
"""
Attempt to parse an xml string conforming to either an SOS or SensorML
dataset and return the results
"""
xml_doc = ET.fromstring(doc)
if xml_doc.tag == "{http://www.opengis.net/sos/1.0}Capabilities":
ds = SensorObservationService(None, xml=doc)
# SensorObservationService does not store the etree doc root,
# so maybe use monkey patching here for now?
ds._root = xml_doc
elif xml_doc.tag == "{http://www.opengis.net/sensorML/1.0.1}SensorML":
ds = SensorML(xml_doc)
else:
raise ValueError("Unrecognized XML root element: {}".format(xml_doc.tag))
return ds
def generate_dataset(self, cdl_path):
'''
Use ncgen to generate a netCDF file from a .cdl file
Returns the path to the generated netcdf file. If ncgen fails, uses
sys.exit(1) to terminate program so a long stack trace is not reported
to the user.
:param str cdl_path: Absolute path to cdl file that is used to generate netCDF file
'''
if '.cdl' in cdl_path: # it's possible the filename doesn't have the .cdl extension
ds_str = cdl_path.replace('.cdl', '.nc')
else:
ds_str = cdl_path + '.nc'
# generate netCDF-4 file
iostat = subprocess.run(['ncgen', '-k', 'nc4', '-o', ds_str, cdl_path], stderr=subprocess.PIPE)
if iostat.returncode != 0:
# if not successfull, create netCDF classic file
print('netCDF-4 file could not be generated from cdl file with ' +
'message:')
print(iostat.stderr.decode())
print('Trying to create netCDF Classic file instead.')
iostat = subprocess.run(['ncgen', '-k', 'nc3', '-o', ds_str, cdl_path], stderr=subprocess.PIPE)
if iostat.returncode != 0:
# Exit program if neither a netCDF Classic nor a netCDF-4 file
# could be created.
print('netCDF Classic file could not be generated from cdl file' +
'with message:')
print(iostat.stderr.decode())
sys.exit(1)
return ds_str
def load_dataset(self, ds_str):
"""
Returns an instantiated instance of either a netCDF file or an SOS
mapped DS object.
:param str ds_str: URL of the resource to load
"""
# If it's a remote URL load it as a remote resource, otherwise treat it
# as a local resource.
pr = urlparse(ds_str)
if pr.netloc:
return self.load_remote_dataset(ds_str)
return self.load_local_dataset(ds_str)
def load_remote_dataset(self, ds_str):
'''
Returns a dataset instance for the remote resource, either OPeNDAP or SOS
:param str ds_str: URL to the remote resource
'''
if opendap.is_opendap(ds_str):
return Dataset(ds_str)
else:
# Check if the HTTP response is XML, if it is, it's likely SOS so
# we'll attempt to parse the response as SOS
response = requests.get(ds_str, allow_redirects=True)
if 'text/xml' in response.headers['content-type']:
return self.process_doc(response.content)
raise ValueError("Unknown service with content-type: {}".format(response.headers['content-type']))
def load_local_dataset(self, ds_str):
'''
Returns a dataset instance for the local resource
:param ds_str: Path to the resource
'''
if cdl.is_cdl(ds_str):
ds_str = self.generate_dataset(ds_str)
if netcdf.is_netcdf(ds_str):
return MemoizedDataset(ds_str)
# Assume this is just a Generic File if it exists
if os.path.isfile(ds_str):
return GenericFile(ds_str)
raise ValueError("File is an unknown format")
def scores(self, raw_scores):
"""
Transforms raw scores from a single checker into a fully tallied and grouped scoreline.
"""
grouped = self._group_raw(raw_scores)
return (grouped)
def _group_raw(self, raw_scores, cur=None, level=1):
"""
Internal recursive method to group raw scores into a cascading score summary.
Only top level items are tallied for scores.
@param list raw_scores: list of raw scores (Result objects)
"""
def trim_groups(r):
if isinstance(r.name, tuple) or isinstance(r.name, list):
new_name = r.name[1:]
else:
new_name = []
return Result(r.weight, r.value, new_name, r.msgs)
# CHECK FOR TERMINAL CONDITION: all raw_scores.name are single length
# @TODO could have a problem here with scalar name, but probably still works
terminal = [len(x.name) for x in raw_scores]
if terminal == [0] * len(raw_scores):
return []
def group_func(r):
"""
Takes a Result object and slices off the first element of its name
if its's a tuple. Otherwise, does nothing to the name. Returns the
Result's name and weight in a tuple to be used for sorting in that
order in a groupby function.
@param Result r
@return tuple (str, int)
"""
if isinstance(r.name, tuple) or isinstance(r.name, list):
if len(r.name) == 0:
retval = ''
else:
retval = r.name[0:1][0]
else:
retval = r.name
return retval, r.weight
# END INTERNAL FUNCS ##########################################
# NOTE until this point, *ALL* Results in raw_scores are
# individual Result objects.
# sort then group by name, then by priority weighting
grouped = itertools.groupby(sorted(raw_scores, key=group_func),
key=group_func)
# NOTE: post-grouping, grouped looks something like
# [(('Global Attributes', 1), <itertools._grouper at 0x7f10982b5390>),
# (('Global Attributes', 3), <itertools._grouper at 0x7f10982b5438>),
# (('Not a Global Attr', 1), <itertools._grouper at 0x7f10982b5470>)]
# (('Some Variable', 2), <itertools._grouper at 0x7f10982b5400>),
ret_val = []
for k, v in grouped: # iterate through the grouped tuples
k = k[0] # slice ("name", weight_val) --> "name"
v = list(v) # from itertools._grouper to list
cv = self._group_raw(list(map(trim_groups, v)), k, level + 1)
if len(cv):
# if this node has children, max weight of children + sum of all the scores
max_weight = max([x.weight for x in cv])
sum_scores = tuple(map(sum, list(zip(*([x.value for x in cv])))))
msgs = []
else:
max_weight = max([x.weight for x in v])
sum_scores = tuple(map(sum, list(zip(*([self._translate_value(x.value) for x in v])))))
msgs = sum([x.msgs for x in v], [])
ret_val.append(Result(name=k, weight=max_weight, value=sum_scores, children=cv, msgs=msgs))
return ret_val
def _translate_value(self, val):
"""
Turns shorthand True/False/None checks into full scores (1, 1)/(0, 1)/(0, 0).
Leaves full scores alone.
"""
if val is True:
return (1, 1)
elif val is False:
return (0, 1)
elif val is None:
return (0, 0)
return val
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class AutoscaleSettingsOperations(object):
"""AutoscaleSettingsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2015-04-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2015-04-01"
self.config = config
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists the autoscale settings for a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of AutoscaleSettingResource
:rtype:
~azure.mgmt.monitor.models.AutoscaleSettingResourcePaged[~azure.mgmt.monitor.models.AutoscaleSettingResource]
:raises:
:class:`ErrorResponseException<azure.mgmt.monitor.models.ErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/autoscalesettings'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.AutoscaleSettingResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.AutoscaleSettingResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, autoscale_setting_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates an autoscale setting.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param autoscale_setting_name: The autoscale setting name.
:type autoscale_setting_name: str
:param parameters: Parameters supplied to the operation.
:type parameters: ~azure.mgmt.monitor.models.AutoscaleSettingResource
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AutoscaleSettingResource or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.monitor.models.AutoscaleSettingResource or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.monitor.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/autoscalesettings/{autoscaleSettingName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'autoscaleSettingName': self._serialize.url("autoscale_setting_name", autoscale_setting_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'AutoscaleSettingResource')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 201]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AutoscaleSettingResource', response)
if response.status_code == 201:
deserialized = self._deserialize('AutoscaleSettingResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, autoscale_setting_name, custom_headers=None, raw=False, **operation_config):
"""Deletes and autoscale setting.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param autoscale_setting_name: The autoscale setting name.
:type autoscale_setting_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.monitor.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/autoscalesettings/{autoscaleSettingName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'autoscaleSettingName': self._serialize.url("autoscale_setting_name", autoscale_setting_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get(
self, resource_group_name, autoscale_setting_name, custom_headers=None, raw=False, **operation_config):
"""Gets an autoscale setting.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param autoscale_setting_name: The autoscale setting name.
:type autoscale_setting_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AutoscaleSettingResource or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.monitor.models.AutoscaleSettingResource or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.monitor.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/autoscalesettings/{autoscaleSettingName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'autoscaleSettingName': self._serialize.url("autoscale_setting_name", autoscale_setting_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AutoscaleSettingResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, autoscale_setting_name, autoscale_setting_resource, custom_headers=None, raw=False, **operation_config):
"""Updates an existing AutoscaleSettingsResource. To update other fields
use the CreateOrUpdate method.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param autoscale_setting_name: The autoscale setting name.
:type autoscale_setting_name: str
:param autoscale_setting_resource: Parameters supplied to the
operation.
:type autoscale_setting_resource:
~azure.mgmt.monitor.models.AutoscaleSettingResourcePatch
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AutoscaleSettingResource or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.monitor.models.AutoscaleSettingResource or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.monitor.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/autoscalesettings/{autoscaleSettingName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'autoscaleSettingName': self._serialize.url("autoscale_setting_name", autoscale_setting_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(autoscale_setting_resource, 'AutoscaleSettingResourcePatch')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AutoscaleSettingResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
|
import unittest
from test import support
from weakref import proxy, ref, WeakSet
import operator
import copy
import string
import os
from random import randrange, shuffle
import sys
import warnings
import collections
from collections import UserString as ustr
import gc
import contextlib
class Foo:
pass
class RefCycle:
def __init__(self):
self.cycle = self
class TestWeakSet(unittest.TestCase):
def setUp(self):
# need to keep references to them
self.items = [ustr(c) for c in ('a', 'b', 'c')]
self.items2 = [ustr(c) for c in ('x', 'y', 'z')]
self.ab_items = [ustr(c) for c in 'ab']
self.abcde_items = [ustr(c) for c in 'abcde']
self.def_items = [ustr(c) for c in 'def']
self.ab_weakset = WeakSet(self.ab_items)
self.abcde_weakset = WeakSet(self.abcde_items)
self.def_weakset = WeakSet(self.def_items)
self.letters = [ustr(c) for c in string.ascii_letters]
self.s = WeakSet(self.items)
self.d = dict.fromkeys(self.items)
self.obj = ustr('F')
self.fs = WeakSet([self.obj])
def test_methods(self):
weaksetmethods = dir(WeakSet)
for method in dir(set):
if method == 'test_c_api' or method.startswith('_'):
continue
self.assertIn(method, weaksetmethods,
"WeakSet missing method " + method)
def test_new_or_init(self):
self.assertRaises(TypeError, WeakSet, [], 2)
def test_len(self):
self.assertEqual(len(self.s), len(self.d))
self.assertEqual(len(self.fs), 1)
del self.obj
gc.collect() # required for IronPython
self.assertEqual(len(self.fs), 0)
def test_contains(self):
for c in self.letters:
self.assertEqual(c in self.s, c in self.d)
# 1 is not weakref'able, but that TypeError is caught by __contains__
self.assertNotIn(1, self.s)
self.assertIn(self.obj, self.fs)
del self.obj
gc.collect() # required for IronPython
self.assertNotIn(ustr('F'), self.fs)
def test_union(self):
u = self.s.union(self.items2)
for c in self.letters:
self.assertEqual(c in u, c in self.d or c in self.items2)
self.assertEqual(self.s, WeakSet(self.items))
self.assertEqual(type(u), WeakSet)
self.assertRaises(TypeError, self.s.union, [[]])
for C in set, frozenset, dict.fromkeys, list, tuple:
x = WeakSet(self.items + self.items2)
c = C(self.items2)
self.assertEqual(self.s.union(c), x)
del c
self.assertEqual(len(u), len(self.items) + len(self.items2))
self.items2.pop()
gc.collect()
self.assertEqual(len(u), len(self.items) + len(self.items2))
def test_or(self):
i = self.s.union(self.items2)
self.assertEqual(self.s | set(self.items2), i)
self.assertEqual(self.s | frozenset(self.items2), i)
def test_intersection(self):
s = WeakSet(self.letters)
i = s.intersection(self.items2)
for c in self.letters:
self.assertEqual(c in i, c in self.items2 and c in self.letters)
self.assertEqual(s, WeakSet(self.letters))
self.assertEqual(type(i), WeakSet)
for C in set, frozenset, dict.fromkeys, list, tuple:
x = WeakSet([])
self.assertEqual(i.intersection(C(self.items)), x)
self.assertEqual(len(i), len(self.items2))
self.items2.pop()
gc.collect()
self.assertEqual(len(i), len(self.items2))
def test_isdisjoint(self):
self.assertTrue(self.s.isdisjoint(WeakSet(self.items2)))
self.assertTrue(not self.s.isdisjoint(WeakSet(self.letters)))
def test_and(self):
i = self.s.intersection(self.items2)
self.assertEqual(self.s & set(self.items2), i)
self.assertEqual(self.s & frozenset(self.items2), i)
def test_difference(self):
i = self.s.difference(self.items2)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c not in self.items2)
self.assertEqual(self.s, WeakSet(self.items))
self.assertEqual(type(i), WeakSet)
self.assertRaises(TypeError, self.s.difference, [[]])
def test_sub(self):
i = self.s.difference(self.items2)
self.assertEqual(self.s - set(self.items2), i)
self.assertEqual(self.s - frozenset(self.items2), i)
def test_symmetric_difference(self):
i = self.s.symmetric_difference(self.items2)
for c in self.letters:
self.assertEqual(c in i, (c in self.d) ^ (c in self.items2))
self.assertEqual(self.s, WeakSet(self.items))
self.assertEqual(type(i), WeakSet)
self.assertRaises(TypeError, self.s.symmetric_difference, [[]])
self.assertEqual(len(i), len(self.items) + len(self.items2))
self.items2.pop()
gc.collect()
self.assertEqual(len(i), len(self.items) + len(self.items2))
def test_xor(self):
i = self.s.symmetric_difference(self.items2)
self.assertEqual(self.s ^ set(self.items2), i)
self.assertEqual(self.s ^ frozenset(self.items2), i)
def test_sub_and_super(self):
self.assertTrue(self.ab_weakset <= self.abcde_weakset)
self.assertTrue(self.abcde_weakset <= self.abcde_weakset)
self.assertTrue(self.abcde_weakset >= self.ab_weakset)
self.assertFalse(self.abcde_weakset <= self.def_weakset)
self.assertFalse(self.abcde_weakset >= self.def_weakset)
self.assertTrue(set('a').issubset('abc'))
self.assertTrue(set('abc').issuperset('a'))
self.assertFalse(set('a').issubset('cbs'))
self.assertFalse(set('cbs').issuperset('a'))
def test_lt(self):
self.assertTrue(self.ab_weakset < self.abcde_weakset)
self.assertFalse(self.abcde_weakset < self.def_weakset)
self.assertFalse(self.ab_weakset < self.ab_weakset)
self.assertFalse(WeakSet() < WeakSet())
def test_gt(self):
self.assertTrue(self.abcde_weakset > self.ab_weakset)
self.assertFalse(self.abcde_weakset > self.def_weakset)
self.assertFalse(self.ab_weakset > self.ab_weakset)
self.assertFalse(WeakSet() > WeakSet())
def test_gc(self):
# Create a nest of cycles to exercise overall ref count check
s = WeakSet(Foo() for i in range(1000))
for elem in s:
elem.cycle = s
elem.sub = elem
elem.set = WeakSet([elem])
def test_subclass_with_custom_hash(self):
# Bug #1257731
class H(WeakSet):
def __hash__(self):
return int(id(self) & 0x7fffffff)
s=H()
f=set()
f.add(s)
self.assertIn(s, f)
f.remove(s)
f.add(s)
f.discard(s)
def test_init(self):
s = WeakSet()
s.__init__(self.items)
self.assertEqual(s, self.s)
s.__init__(self.items2)
self.assertEqual(s, WeakSet(self.items2))
self.assertRaises(TypeError, s.__init__, s, 2);
self.assertRaises(TypeError, s.__init__, 1);
def test_constructor_identity(self):
s = WeakSet(self.items)
t = WeakSet(s)
self.assertNotEqual(id(s), id(t))
def test_hash(self):
self.assertRaises(TypeError, hash, self.s)
def test_clear(self):
self.s.clear()
self.assertEqual(self.s, WeakSet([]))
self.assertEqual(len(self.s), 0)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(self.s, dup)
self.assertNotEqual(id(self.s), id(dup))
def test_add(self):
x = ustr('Q')
self.s.add(x)
self.assertIn(x, self.s)
dup = self.s.copy()
self.s.add(x)
self.assertEqual(self.s, dup)
self.assertRaises(TypeError, self.s.add, [])
self.fs.add(Foo())
gc.collect() # required for IronPython
self.assertTrue(len(self.fs) == 1)
self.fs.add(self.obj)
self.assertTrue(len(self.fs) == 1)
def test_remove(self):
x = ustr('a')
self.s.remove(x)
self.assertNotIn(x, self.s)
self.assertRaises(KeyError, self.s.remove, x)
self.assertRaises(TypeError, self.s.remove, [])
def test_discard(self):
a, q = ustr('a'), ustr('Q')
self.s.discard(a)
self.assertNotIn(a, self.s)
self.s.discard(q)
self.assertRaises(TypeError, self.s.discard, [])
def test_pop(self):
for i in range(len(self.s)):
elem = self.s.pop()
self.assertNotIn(elem, self.s)
self.assertRaises(KeyError, self.s.pop)
def test_update(self):
retval = self.s.update(self.items2)
self.assertEqual(retval, None)
for c in (self.items + self.items2):
self.assertIn(c, self.s)
self.assertRaises(TypeError, self.s.update, [[]])
def test_update_set(self):
self.s.update(set(self.items2))
for c in (self.items + self.items2):
self.assertIn(c, self.s)
def test_ior(self):
self.s |= set(self.items2)
for c in (self.items + self.items2):
self.assertIn(c, self.s)
def test_intersection_update(self):
retval = self.s.intersection_update(self.items2)
self.assertEqual(retval, None)
for c in (self.items + self.items2):
if c in self.items2 and c in self.items:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(TypeError, self.s.intersection_update, [[]])
def test_iand(self):
self.s &= set(self.items2)
for c in (self.items + self.items2):
if c in self.items2 and c in self.items:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_difference_update(self):
retval = self.s.difference_update(self.items2)
self.assertEqual(retval, None)
for c in (self.items + self.items2):
if c in self.items and c not in self.items2:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(TypeError, self.s.difference_update, [[]])
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
def test_isub(self):
self.s -= set(self.items2)
for c in (self.items + self.items2):
if c in self.items and c not in self.items2:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_symmetric_difference_update(self):
retval = self.s.symmetric_difference_update(self.items2)
self.assertEqual(retval, None)
for c in (self.items + self.items2):
if (c in self.items) ^ (c in self.items2):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
def test_ixor(self):
self.s ^= set(self.items2)
for c in (self.items + self.items2):
if (c in self.items) ^ (c in self.items2):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_inplace_on_self(self):
t = self.s.copy()
t |= t
self.assertEqual(t, self.s)
t &= t
self.assertEqual(t, self.s)
t -= t
self.assertEqual(t, WeakSet())
t = self.s.copy()
t ^= t
self.assertEqual(t, WeakSet())
def test_eq(self):
# issue 5964
self.assertTrue(self.s == self.s)
self.assertTrue(self.s == WeakSet(self.items))
self.assertFalse(self.s == set(self.items))
self.assertFalse(self.s == list(self.items))
self.assertFalse(self.s == tuple(self.items))
self.assertFalse(self.s == WeakSet([Foo]))
self.assertFalse(self.s == 1)
def test_ne(self):
self.assertTrue(self.s != set(self.items))
s1 = WeakSet()
s2 = WeakSet()
self.assertFalse(s1 != s2)
def test_weak_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
# Create new items to be sure no-one else holds a reference
items = [ustr(c) for c in ('a', 'b', 'c')]
s = WeakSet(items)
it = iter(s)
next(it) # Trigger internal iteration
# Destroy an item
del items[-1]
gc.collect() # just in case
# We have removed either the first consumed items, or another one
self.assertIn(len(list(it)), [len(items), len(items) - 1])
del it
gc.collect() # required for IronPython
# The removal has been committed
self.assertEqual(len(s), len(items))
def test_weak_destroy_and_mutate_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
items = [ustr(c) for c in string.ascii_letters]
s = WeakSet(items)
@contextlib.contextmanager
def testcontext():
try:
it = iter(s)
# Start iterator
yielded = ustr(str(next(it)))
# Schedule an item for removal and recreate it
u = ustr(str(items.pop()))
if yielded == u:
# The iterator still has a reference to the removed item,
# advance it (issue #20006).
next(it)
gc.collect() # just in case
yield u
finally:
it = None # should commit all removals
with testcontext() as u:
self.assertNotIn(u, s)
with testcontext() as u:
self.assertRaises(KeyError, s.remove, u)
self.assertNotIn(u, s)
with testcontext() as u:
s.add(u)
self.assertIn(u, s)
t = s.copy()
with testcontext() as u:
s.update(t)
self.assertEqual(len(s), len(t))
with testcontext() as u:
s.clear()
self.assertEqual(len(s), 0)
def test_len_cycles(self):
N = 20
items = [RefCycle() for i in range(N)]
s = WeakSet(items)
del items
it = iter(s)
try:
next(it)
except StopIteration:
pass
gc.collect()
n1 = len(s)
del it
gc.collect()
n2 = len(s)
# one item may be kept alive inside the iterator
self.assertIn(n1, (0, 1))
self.assertEqual(n2, 0)
def test_len_race(self):
# Extended sanity checks for len() in the face of cyclic collection
self.addCleanup(gc.set_threshold, *gc.get_threshold())
for th in range(1, 100):
N = 20
gc.collect(0)
gc.set_threshold(th, th, th)
items = [RefCycle() for i in range(N)]
s = WeakSet(items)
del items
# All items will be collected at next garbage collection pass
it = iter(s)
try:
next(it)
except StopIteration:
pass
n1 = len(s)
del it
n2 = len(s)
self.assertGreaterEqual(n1, 0)
self.assertLessEqual(n1, N)
self.assertGreaterEqual(n2, 0)
self.assertLessEqual(n2, n1)
def test_main(verbose=None):
support.run_unittest(TestWeakSet)
if __name__ == "__main__":
test_main(verbose=True)
|
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The OpenTelemetry tracing API describes the classes used to generate
distributed traces.
The :class:`.Tracer` class controls access to the execution context, and
manages span creation. Each operation in a trace is represented by a
:class:`.Span`, which records the start, end time, and metadata associated with
the operation.
This module provides abstract (i.e. unimplemented) classes required for
tracing, and a concrete no-op :class:`.NonRecordingSpan` that allows applications
to use the API package alone without a supporting implementation.
To get a tracer, you need to provide the package name from which you are
calling the tracer APIs to OpenTelemetry by calling `TracerProvider.get_tracer`
with the calling module name and the version of your package.
The tracer supports creating spans that are "attached" or "detached" from the
context. New spans are "attached" to the context in that they are
created as children of the currently active span, and the newly-created span
can optionally become the new active span::
from opentelemetry import trace
tracer = trace.get_tracer(__name__)
# Create a new root span, set it as the current span in context
with tracer.start_as_current_span("parent"):
# Attach a new child and update the current span
with tracer.start_as_current_span("child"):
do_work():
# Close child span, set parent as current
# Close parent span, set default span as current
When creating a span that's "detached" from the context the active span doesn't
change, and the caller is responsible for managing the span's lifetime::
# Explicit parent span assignment is done via the Context
from opentelemetry.trace import set_span_in_context
context = set_span_in_context(parent)
child = tracer.start_span("child", context=context)
try:
do_work(span=child)
finally:
child.end()
Applications should generally use a single global TracerProvider, and use
either implicit or explicit context propagation consistently throughout.
.. versionadded:: 0.1.0
.. versionchanged:: 0.3.0
`TracerProvider` was introduced and the global ``tracer`` getter was
replaced by ``tracer_provider``.
.. versionchanged:: 0.5.0
``tracer_provider`` was replaced by `get_tracer_provider`,
``set_preferred_tracer_provider_implementation`` was replaced by
`set_tracer_provider`.
"""
import os
import typing
from abc import ABC, abstractmethod
from contextlib import contextmanager
from enum import Enum
from logging import getLogger
from typing import Iterator, Optional, Sequence, cast
from deprecated import deprecated
from opentelemetry import context as context_api
from opentelemetry.attributes import BoundedAttributes # type: ignore
from opentelemetry.context.context import Context
from opentelemetry.environment_variables import OTEL_PYTHON_TRACER_PROVIDER
from opentelemetry.trace.propagation import (
_SPAN_KEY,
get_current_span,
set_span_in_context,
)
from opentelemetry.trace.span import (
DEFAULT_TRACE_OPTIONS,
DEFAULT_TRACE_STATE,
INVALID_SPAN,
INVALID_SPAN_CONTEXT,
INVALID_SPAN_ID,
INVALID_TRACE_ID,
NonRecordingSpan,
Span,
SpanContext,
TraceFlags,
TraceState,
format_span_id,
format_trace_id,
)
from opentelemetry.trace.status import Status, StatusCode
from opentelemetry.util import types
from opentelemetry.util._once import Once
from opentelemetry.util._providers import _load_provider
logger = getLogger(__name__)
class _LinkBase(ABC):
def __init__(self, context: "SpanContext") -> None:
self._context = context
@property
def context(self) -> "SpanContext":
return self._context
@property
@abstractmethod
def attributes(self) -> types.Attributes:
pass
class Link(_LinkBase):
"""A link to a `Span`. The attributes of a Link are immutable.
Args:
context: `SpanContext` of the `Span` to link to.
attributes: Link's attributes.
"""
def __init__(
self,
context: "SpanContext",
attributes: types.Attributes = None,
) -> None:
super().__init__(context)
self._attributes = BoundedAttributes(
attributes=attributes
) # type: types.Attributes
@property
def attributes(self) -> types.Attributes:
return self._attributes
_Links = Optional[Sequence[Link]]
class SpanKind(Enum):
"""Specifies additional details on how this span relates to its parent span.
Note that this enumeration is experimental and likely to change. See
https://github.com/open-telemetry/opentelemetry-specification/pull/226.
"""
#: Default value. Indicates that the span is used internally in the
# application.
INTERNAL = 0
#: Indicates that the span describes an operation that handles a remote
# request.
SERVER = 1
#: Indicates that the span describes a request to some remote service.
CLIENT = 2
#: Indicates that the span describes a producer sending a message to a
#: broker. Unlike client and server, there is usually no direct critical
#: path latency relationship between producer and consumer spans.
PRODUCER = 3
#: Indicates that the span describes a consumer receiving a message from a
#: broker. Unlike client and server, there is usually no direct critical
#: path latency relationship between producer and consumer spans.
CONSUMER = 4
class TracerProvider(ABC):
@abstractmethod
def get_tracer(
self,
instrumenting_module_name: str,
instrumenting_library_version: typing.Optional[str] = None,
schema_url: typing.Optional[str] = None,
) -> "Tracer":
"""Returns a `Tracer` for use by the given instrumentation library.
For any two calls it is undefined whether the same or different
`Tracer` instances are returned, even for different library names.
This function may return different `Tracer` types (e.g. a no-op tracer
vs. a functional tracer).
Args:
instrumenting_module_name: The name of the instrumenting module.
``__name__`` may not be used as this can result in
different tracer names if the tracers are in different files.
It is better to use a fixed string that can be imported where
needed and used consistently as the name of the tracer.
This should *not* be the name of the module that is
instrumented but the name of the module doing the instrumentation.
E.g., instead of ``"requests"``, use
``"opentelemetry.instrumentation.requests"``.
instrumenting_library_version: Optional. The version string of the
instrumenting library. Usually this should be the same as
``pkg_resources.get_distribution(instrumenting_library_name).version``.
schema_url: Optional. Specifies the Schema URL of the emitted telemetry.
"""
class NoOpTracerProvider(TracerProvider):
"""The default TracerProvider, used when no implementation is available.
All operations are no-op.
"""
def get_tracer(
self,
instrumenting_module_name: str,
instrumenting_library_version: typing.Optional[str] = None,
schema_url: typing.Optional[str] = None,
) -> "Tracer":
# pylint:disable=no-self-use,unused-argument
return NoOpTracer()
@deprecated(version="1.9.0", reason="You should use NoOpTracerProvider") # type: ignore
class _DefaultTracerProvider(NoOpTracerProvider):
"""The default TracerProvider, used when no implementation is available.
All operations are no-op.
"""
class ProxyTracerProvider(TracerProvider):
def get_tracer(
self,
instrumenting_module_name: str,
instrumenting_library_version: typing.Optional[str] = None,
schema_url: typing.Optional[str] = None,
) -> "Tracer":
if _TRACER_PROVIDER:
return _TRACER_PROVIDER.get_tracer(
instrumenting_module_name,
instrumenting_library_version,
schema_url,
)
return ProxyTracer(
instrumenting_module_name,
instrumenting_library_version,
schema_url,
)
class Tracer(ABC):
"""Handles span creation and in-process context propagation.
This class provides methods for manipulating the context, creating spans,
and controlling spans' lifecycles.
"""
@abstractmethod
def start_span(
self,
name: str,
context: Optional[Context] = None,
kind: SpanKind = SpanKind.INTERNAL,
attributes: types.Attributes = None,
links: _Links = None,
start_time: Optional[int] = None,
record_exception: bool = True,
set_status_on_exception: bool = True,
) -> "Span":
"""Starts a span.
Create a new span. Start the span without setting it as the current
span in the context. To start the span and use the context in a single
method, see :meth:`start_as_current_span`.
By default the current span in the context will be used as parent, but an
explicit context can also be specified, by passing in a `Context` containing
a current `Span`. If there is no current span in the global `Context` or in
the specified context, the created span will be a root span.
The span can be used as a context manager. On exiting the context manager,
the span's end() method will be called.
Example::
# trace.get_current_span() will be used as the implicit parent.
# If none is found, the created span will be a root instance.
with tracer.start_span("one") as child:
child.add_event("child's event")
Args:
name: The name of the span to be created.
context: An optional Context containing the span's parent. Defaults to the
global context.
kind: The span's kind (relationship to parent). Note that is
meaningful even if there is no parent.
attributes: The span's attributes.
links: Links span to other spans
start_time: Sets the start time of a span
record_exception: Whether to record any exceptions raised within the
context as error event on the span.
set_status_on_exception: Only relevant if the returned span is used
in a with/context manager. Defines wether the span status will
be automatically set to ERROR when an uncaught exception is
raised in the span with block. The span status won't be set by
this mechanism if it was previously set manually.
Returns:
The newly-created span.
"""
@contextmanager
@abstractmethod
def start_as_current_span(
self,
name: str,
context: Optional[Context] = None,
kind: SpanKind = SpanKind.INTERNAL,
attributes: types.Attributes = None,
links: _Links = None,
start_time: Optional[int] = None,
record_exception: bool = True,
set_status_on_exception: bool = True,
end_on_exit: bool = True,
) -> Iterator["Span"]:
"""Context manager for creating a new span and set it
as the current span in this tracer's context.
Exiting the context manager will call the span's end method,
as well as return the current span to its previous value by
returning to the previous context.
Example::
with tracer.start_as_current_span("one") as parent:
parent.add_event("parent's event")
with trace.start_as_current_span("two") as child:
child.add_event("child's event")
trace.get_current_span() # returns child
trace.get_current_span() # returns parent
trace.get_current_span() # returns previously active span
This is a convenience method for creating spans attached to the
tracer's context. Applications that need more control over the span
lifetime should use :meth:`start_span` instead. For example::
with tracer.start_as_current_span(name) as span:
do_work()
is equivalent to::
span = tracer.start_span(name)
with opentelemetry.trace.use_span(span, end_on_exit=True):
do_work()
Args:
name: The name of the span to be created.
context: An optional Context containing the span's parent. Defaults to the
global context.
kind: The span's kind (relationship to parent). Note that is
meaningful even if there is no parent.
attributes: The span's attributes.
links: Links span to other spans
start_time: Sets the start time of a span
record_exception: Whether to record any exceptions raised within the
context as error event on the span.
set_status_on_exception: Only relevant if the returned span is used
in a with/context manager. Defines wether the span status will
be automatically set to ERROR when an uncaught exception is
raised in the span with block. The span status won't be set by
this mechanism if it was previously set manually.
end_on_exit: Whether to end the span automatically when leaving the
context manager.
Yields:
The newly-created span.
"""
class ProxyTracer(Tracer):
# pylint: disable=W0222,signature-differs
def __init__(
self,
instrumenting_module_name: str,
instrumenting_library_version: typing.Optional[str] = None,
schema_url: typing.Optional[str] = None,
):
self._instrumenting_module_name = instrumenting_module_name
self._instrumenting_library_version = instrumenting_library_version
self._schema_url = schema_url
self._real_tracer: Optional[Tracer] = None
self._noop_tracer = NoOpTracer()
@property
def _tracer(self) -> Tracer:
if self._real_tracer:
return self._real_tracer
if _TRACER_PROVIDER:
self._real_tracer = _TRACER_PROVIDER.get_tracer(
self._instrumenting_module_name,
self._instrumenting_library_version,
self._schema_url,
)
return self._real_tracer
return self._noop_tracer
def start_span(self, *args, **kwargs) -> Span: # type: ignore
return self._tracer.start_span(*args, **kwargs) # type: ignore
def start_as_current_span(self, *args, **kwargs) -> Span: # type: ignore
return self._tracer.start_as_current_span(*args, **kwargs) # type: ignore
class NoOpTracer(Tracer):
"""The default Tracer, used when no Tracer implementation is available.
All operations are no-op.
"""
def start_span(
self,
name: str,
context: Optional[Context] = None,
kind: SpanKind = SpanKind.INTERNAL,
attributes: types.Attributes = None,
links: _Links = None,
start_time: Optional[int] = None,
record_exception: bool = True,
set_status_on_exception: bool = True,
) -> "Span":
# pylint: disable=unused-argument,no-self-use
return INVALID_SPAN
@contextmanager
def start_as_current_span(
self,
name: str,
context: Optional[Context] = None,
kind: SpanKind = SpanKind.INTERNAL,
attributes: types.Attributes = None,
links: _Links = None,
start_time: Optional[int] = None,
record_exception: bool = True,
set_status_on_exception: bool = True,
end_on_exit: bool = True,
) -> Iterator["Span"]:
# pylint: disable=unused-argument,no-self-use
yield INVALID_SPAN
@deprecated(version="1.9.0", reason="You should use NoOpTracer") # type: ignore
class _DefaultTracer(NoOpTracer):
"""The default Tracer, used when no Tracer implementation is available.
All operations are no-op.
"""
_TRACER_PROVIDER_SET_ONCE = Once()
_TRACER_PROVIDER: Optional[TracerProvider] = None
_PROXY_TRACER_PROVIDER = ProxyTracerProvider()
def get_tracer(
instrumenting_module_name: str,
instrumenting_library_version: typing.Optional[str] = None,
tracer_provider: Optional[TracerProvider] = None,
schema_url: typing.Optional[str] = None,
) -> "Tracer":
"""Returns a `Tracer` for use by the given instrumentation library.
This function is a convenience wrapper for
opentelemetry.trace.TracerProvider.get_tracer.
If tracer_provider is omitted the current configured one is used.
"""
if tracer_provider is None:
tracer_provider = get_tracer_provider()
return tracer_provider.get_tracer(
instrumenting_module_name, instrumenting_library_version, schema_url
)
def _set_tracer_provider(tracer_provider: TracerProvider, log: bool) -> None:
def set_tp() -> None:
global _TRACER_PROVIDER # pylint: disable=global-statement
_TRACER_PROVIDER = tracer_provider
did_set = _TRACER_PROVIDER_SET_ONCE.do_once(set_tp)
if log and not did_set:
logger.warning("Overriding of current TracerProvider is not allowed")
def set_tracer_provider(tracer_provider: TracerProvider) -> None:
"""Sets the current global :class:`~.TracerProvider` object.
This can only be done once, a warning will be logged if any furter attempt
is made.
"""
_set_tracer_provider(tracer_provider, log=True)
def get_tracer_provider() -> TracerProvider:
"""Gets the current global :class:`~.TracerProvider` object."""
if _TRACER_PROVIDER is None:
# if a global tracer provider has not been set either via code or env
# vars, return a proxy tracer provider
if OTEL_PYTHON_TRACER_PROVIDER not in os.environ:
return _PROXY_TRACER_PROVIDER
tracer_provider: TracerProvider = _load_provider(
OTEL_PYTHON_TRACER_PROVIDER, "tracer_provider"
)
_set_tracer_provider(tracer_provider, log=False)
# _TRACER_PROVIDER will have been set by one thread
return cast("TracerProvider", _TRACER_PROVIDER)
@contextmanager
def use_span(
span: Span,
end_on_exit: bool = False,
record_exception: bool = True,
set_status_on_exception: bool = True,
) -> Iterator[Span]:
"""Takes a non-active span and activates it in the current context.
Args:
span: The span that should be activated in the current context.
end_on_exit: Whether to end the span automatically when leaving the
context manager scope.
record_exception: Whether to record any exceptions raised within the
context as error event on the span.
set_status_on_exception: Only relevant if the returned span is used
in a with/context manager. Defines wether the span status will
be automatically set to ERROR when an uncaught exception is
raised in the span with block. The span status won't be set by
this mechanism if it was previously set manually.
"""
try:
token = context_api.attach(context_api.set_value(_SPAN_KEY, span))
try:
yield span
finally:
context_api.detach(token)
except Exception as exc: # pylint: disable=broad-except
if isinstance(span, Span) and span.is_recording():
# Record the exception as an event
if record_exception:
span.record_exception(exc)
# Set status in case exception was raised
if set_status_on_exception:
span.set_status(
Status(
status_code=StatusCode.ERROR,
description=f"{type(exc).__name__}: {exc}",
)
)
raise
finally:
if end_on_exit:
span.end()
__all__ = [
"DEFAULT_TRACE_OPTIONS",
"DEFAULT_TRACE_STATE",
"INVALID_SPAN",
"INVALID_SPAN_CONTEXT",
"INVALID_SPAN_ID",
"INVALID_TRACE_ID",
"NonRecordingSpan",
"Link",
"Span",
"SpanContext",
"SpanKind",
"TraceFlags",
"TraceState",
"TracerProvider",
"Tracer",
"format_span_id",
"format_trace_id",
"get_current_span",
"get_tracer",
"get_tracer_provider",
"set_tracer_provider",
"set_span_in_context",
"use_span",
"Status",
"StatusCode",
]
|
|
import datetime
import errno
from flickrapi import FlickrAPI, FlickrError
from flickrauth import API_KEY, API_SECRET
from lxml import etree
from os import makedirs
from os.path import abspath, exists, isdir, isfile, join, splitext
from PIL import Image, ImageCms
import re
import simplejson as json
import sys
import urllib2
RX_FLICKR_URL = re.compile('^(http:\/\/www\.flickr\.com\/photos\/[^\/]+\/)(\d+)\/?$')
RX_FLICRK_URL_SET = re.compile('^(http:\/\/www\.flickr\.com\/photos\/[^\/]+\/)(\d+)\/in\/set-(\d+)\/?$')
RX_PLEIADES_URL = re.compile('^(http:\/\/pleiades\.stoa\.org\/places\/)(\d+)\/?.*$')
PROFILE_SRGB = 'awibmanager/icc/sRGB_IEC61966-2-1_black_scaled.icc'
fields = {
'copyright' :
("//info[@type='isaw']/copyright-holder", "copyright", "'%s'"),
'title' :
("//info[@type='isaw']/title", 'Headline', "'%s'"),
'keywords' : ("//info[@type='isaw']/typology/keyword", "keywords", "'%s'", ('sequence',)),
'description' : ("//info[@type='isaw']/description", '', "%s"),
'date' :
("//info[@type='isaw']/date-photographed | //info[@type='isaw']/date-scanned", "CreateDate", "'%s 00:00:01'", ('replace', '-', ':')),
'creator' :
("//info[@type='isaw']/photographer", "creator", "'%s'", ('alltext',)),
'ancient' : ("//info[@type='isaw']/geography/photographed-place/ancient-name", '', "%s"),
'modern' : ("//info[@type='isaw']/geography/photographed-place/modern-name", '', "%s"),
'uri' : ("//info[@type='isaw']/geography/photographed-place/uri", '', "%s"),
'fda' : ("//info[@type='isaw']/fda-handle", '', "%s"),
'authority' : ("//info[@type='isaw']/authority", '', "%s") }
SPACEPATTERN = u'\s+'
def normalizetext(source):
""" Condense arbitrary spans of spaces and newlines in a unicode string down
to a single space. Returns a unicode string. """
#return u' '.join(source.replace(u'\n', u' ').strip().split()).strip()
rex = re.compile(SPACEPATTERN, re.UNICODE)
return rex.sub(u' ', source).strip()
def getalltext(elem):
"""Create a document-ordered string from all text nodes in an XML element and its child nodes"""
text = elem.text or ""
for e in elem:
text += getalltext(e)
if e.tail:
text += e.tail
return text
def getval(meta, field):
"""
get the desired field from the metadata file
"""
xpath = ''
try:
xpath = fields[field][0]
except:
print "failed to look up in fields where field = '%s', position 0" % field
raise
if xpath != '':
val = meta.xpath(xpath)
return val
def getMemoryProfile(buffer):
try:
return ImageCms.core.profile_fromstring(buffer)
except (IOError, TypeError, ValueError), v:
raise PyCMSError(v)
def fileval(root, interstitial, id, tail):
fn = join(root, interstitial, '-'.join((id, tail)))
if isfile(fn):
return fn
else:
raise IOError, "'%s' is not a file" % fn
def getFlickr(key=API_KEY, secret=API_SECRET):
flickr = FlickrAPI(key, secret)
(token, frob) = flickr.get_token_part_one(perms='write')
if not token: raw_input("Press ENTER after you authorized this program")
flickr.get_token_part_two((token, frob))
return flickr
class AI():
""" handle all information, behaviors, and avatars of an AWIB image """
def __init__(self, id, path='.', verify=['pleiades',]):
""" initialize an AWIB image object by reading data from disk and verifying online sources as necessary """
self.modified = False
self.mods = []
self.flickr_verified = False
self.pleiades_verified = False
self.pleiades_json = {}
# the awib ID is the one ring to rule all these images
self.id = id
# determine and verify paths to the files on disk that correspond to this image
if path is not None:
if isdir(path):
self.path = path
try:
self.fn_meta = fileval(path, 'meta', id, 'meta.xml')
except IOError as e:
raise type(e), type(e)(e.message + " while trying to set metadata filename"), sys.exc_info()[2]
try:
self.fn_master = fileval(path, 'masters', id, 'master.tif')
except IOError as e:
raise type(e), type(e)(e.message + " while trying to set master filename"), sys.exc_info()[2]
try:
self.fn_review = fileval(path, 'review-images', id, 'review.jpg')
except IOError:
self.fn_review = None
try:
self.fn_thumb = fileval(path, 'thumbnails', id, 'thumb.jpg')
except IOError:
self.fn_thumb = None
else:
raise IOError, "'%s' is not a directory" % path
# read and parse filesystem metadata
self.loadxml()
# determine and validate information about this image in flickr
if 'flickr' in verify:
self.verify_flickr()
# determine and validate information about this image in the FDA
# determine and validate information about this image in Pleiades
if 'pleiades' in verify:
self.verify_pleiades()
# TBD
def loadxml(self):
""" load AWIB image data from standard metadata XML file """
self.photographers = []
self.keywords = []
self.geography = []
full = ''
family = ''
given = ''
f = open(self.fn_meta)
meta = etree.parse(f)
self.meta = meta
f.close()
w = "//info[@type='isaw']"
for ele in meta.xpath(join(w, "*")):
if ele.tag == 'photographer':
d = {}
if len(ele) != 0:
for sub in ele:
d[sub.tag.replace('-', '_')] = sub.text
elif ele.text is not None:
d['name'] = ele.text
if 'name' not in d.keys() and 'family_name' in d.keys() and 'given_name' in d.keys():
d['name'] = ' '.join((d['given_name'], d['family_name']))
self.logmod('add', join(meta.getpath(ele), 'name'), len(self.photographers), 'generated full name for photographer from given and family names already in the metadata file')
if len(d) > 0:
self.photographers.append(d)
elif ele.tag == 'typology':
if len(ele) != 0:
for sub in ele:
self.keywords.append(sub.text)
elif ele.tag == 'geography':
if len(ele) != 0:
for sub in ele:
d = {}
if len(sub) != 0:
d['type'] = sub.tag
for subsub in sub:
if subsub.tag == 'uri':
m = RX_PLEIADES_URL.match(subsub.text)
if m is not None:
g = m.groups()
uri = join(g[0], g[1])
if subsub.text != uri:
self.logmod('change', meta.getpath(subsub), notes='removed extraneous elements from the Pleiades URI')
d['uri'] = uri
elif subsub.text is not None:
d[subsub.tag.replace('-', '_')] = subsub.text
if len(d) > 0:
self.geography.append(d)
elif ele.tag == 'flickr-url':
flickr_url = None
flickr_id = None
flickr_set = None
m = RX_FLICKR_URL.match(ele.text)
if m is None:
m = RX_FLICRK_URL_SET.match(ele.text)
if m is not None:
g = m.groups()
flickr_url = join(g[0], g[1])
if ele.text != flickr_url:
self.logmod('change', meta.getpath(ele), notes='removed extraneous elements from the flickr URL')
self.flickr_url = flickr_url
flickr_id = g[1]
if len(g) > 2:
flickr_set = g[2]
elif ele.tag in ['prosopography', 'notes', 'chronology']:
# suppress these for now
pass
else:
setattr(self, ele.tag.replace('-', '_'), ele.text)
try:
getattr(self, 'flickr_url')
try:
getattr(self, 'flickr_id')
except AttributeError:
if flickr_id is not None:
setattr(self, 'flickr_id', flickr_id)
self.logmod('add', join(w, 'flickr-id'), notes='created flickr ID as extracted from flickr URL')
try:
getattr(self, 'flickr_set')
except AttributeError:
if flickr_set is not None:
setattr(self, 'flickr_set', flickr_set)
self.logmod('add', join(w, 'flickr-set'), notes='created flickr set id as extracted from flickr URL')
except AttributeError:
pass
def prep_for_flickr(self):
""" prepares image for upload to flickr """
# create a PNG
im_master = Image.open(self.fn_master)
try:
pf = ImageCms.core.profile_fromstring(im_master.info['icc_profile'])
except AttributeError as e:
raise type(e), type(e)(e.message + " no icc profile defined on master image '%s'" % self.fn_master), sys.exc_info()[2]
else:
im_flickr2 = ImageCms.profileToProfile(im_master, pf, PROFILE_SRGB)
path = join(self.path, 'flickr2')
try:
makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
fn = join(path, self.id + '.png')
if exists(fn):
raise IOError("'file exists :'%s'" % fn)
else:
im_flickr2.save(fn, format='PNG')
self.fn_flickr2 = fn
# subprocess.call("exiftool -tagsFromFile %s %s" % (self.fn_master, fn))
# create metadata
self.make_flickr_caption()
def send_to_flickr(self):
if isfile(self.fn_flickr2):
flickr = getFlickr()
title = ' '.join((self.title, 'by', self.photographers[0]['name'], 'et al.' if len(self.photographers)>1 else ''))
resp = flickr.upload(filename=self.fn_flickr2, title=title, description=self.flickr_caption, tags=' '.join(self.keywords), is_public=0)
def logmod(self, mtype, xpath, seq=0, notes=None):
self.modified = True
t = (mtype, xpath, seq, notes)
self.mods.append(t)
def make_flickr_caption(self):
""" prepare an awib-style caption as used on flickr """
caption = ""
title = ''
description = ''
creator = ''
year = ''
copyright = ''
ancient = ''
modern = ''
uri = ''
fda = ''
meta = self.meta
v = getval(meta, 'title')
vt = v[0].text
if vt is not None:
title = "%s" % vt
title = normalizetext(title)
caption += "AWIB-ISAW: %s" % title
v = getval(meta, 'description')
vt = v[0].text
if vt is not None:
description = "%s" % vt
description = normalizetext(description)
caption += "\n%s" % description
v = getval(meta, "creator")
vt = normalizetext(getalltext(v[0]))
if vt is not None:
creator = "%s" % vt
creator = normalizetext(creator)
caption += " by %s" % creator
v = getval(meta, "date")
try:
vt = v[0].text
except IndexError:
print "index error trying to get date"
raise
if vt is not None:
year = vt.split('-')[0]
caption += " (%s)" % year
v = getval(meta, "copyright")
vt = v[0].text
if vt is not None:
copyright = vt
copyright = normalizetext(copyright)
caption += "\ncopyright: "
if len(year) > 0:
caption += "%s " % year
caption += "%s (used with permission)" % copyright
v = getval(meta, 'ancient')
vt = v[0].text
if vt is not None:
ancient = "%s" % vt
v = getval(meta, 'modern')
vt = v[0].text
if vt is not None:
modern = "%s" % vt
v = getval(meta, 'uri')
vt = v[0].text
if vt is not None:
if vt .startswith('http://pleiades.stoa.org'):
uri = vt
else:
uri = "http://atlantides.org/batlas/%s" % vt
if len(ancient) > 0 or len(modern) > 0 or len(uri) > 0:
caption += "\nphotographed place: "
if len(ancient) > 0:
caption += "%s " % ancient
if len(modern) > 0:
caption += "(%s) " % modern
if len(uri) > 0:
caption += "[%s]" % uri
v = getval(meta, 'fda')
vt = v[0].text
if vt is not None :
fda = vt
caption += "\narchival copy: %s" % fda
v = getval(meta, 'authority')
vt = v[0].text
if vt is not None:
authority = vt
authority = normalizetext(authority)
caption += "\nauthority: %s" % authority
caption += "\n\nPublished by the Institute for the Study of the Ancient World as part of the Ancient World Image Bank (AWIB). Further information: [http://www.nyu.edu/isaw/awib.htm]."
self.flickr_caption = caption
return caption
def verify_flickr(self):
try:
getattr(self, 'flickr_id')
except:
pass
else:
flickr = getFlickr()
r = flickr.photos_getInfo(photo_id=self.flickr_id)
if r.attrib['stat'] != 'ok':
self.flickr_verified = 'Flickr API reports photo_id=%s not found' % self.flickr_id
raise FlickrError(self.flickr_verified)
else:
self.flickr_verified = True
def verify_pleiades(self):
""" verify Pleiades data associated with this image """
for g in self.geography:
if 'uri' in g.keys():
m = RX_PLEIADES_URL.match(g['uri'])
if m is not None:
try:
j = self.get_pleiades_json(g['uri'])
except urllib2.HTTPError as e:
g['verified'] = e.message
raise
else:
if 'verified' not in g.keys():
self.logmod('add', join(self.meta.getpath(self.meta.xpath("//*[starts-with(uri, '%s')]" % g['uri'])[0]), 'verified'), notes='verified Pleiades URI')
g['verified'] = True
self.pleiades_verified = True
def get_pleiades_json(self, uri):
""" tries to download json associated with the place uri; does runtime caching of results """
pid = uri.replace('http://pleiades.stoa.org/places/', '')
try:
return self.pleiades_json[pid]
except KeyError:
jurl = join(uri, 'json')
try:
results = json.load(urllib2.urlopen(jurl))
except urllib2.HTTPError as e:
raise type(e), type(e)(e.message + " while trying to get '%s'" % jurl), sys.exc_info()[2]
else:
self.pleiades_json[pid] = results
return results
def __str__(self):
""" output a serialized version of this object and its content """
d = vars(self)
for k in sorted(d.iterkeys()):
print "%s: '%s'" % (k, d[k])
|
|
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
from six import with_metaclass
from pygments.token import Token
from prompt_toolkit.document import Document
from prompt_toolkit.enums import SEARCH_BUFFER
from prompt_toolkit.filters import to_cli_filter, Never
from .utils import token_list_len
__all__ = (
'HighlightSearchProcessor',
'HighlightSelectionProcessor',
'PasswordProcessor',
'BracketsMismatchProcessor',
'BeforeInput',
'AfterInput',
'ConditionalProcessor',
'ShowLeadingWhiteSpaceProcessor',
'ShowTrailingWhiteSpaceProcessor',
)
class Processor(with_metaclass(ABCMeta, object)):
"""
Manipulate the tokenstream for a `BufferControl`.
"""
@abstractmethod
def run(self, cli, document, tokens):
return tokens, lambda i: i
def invalidation_hash(self, cli, document):
return None
class HighlightSearchProcessor(Processor):
"""
Processor that highlights search matches in the document.
:param preview_search: A Filter; when active it indicates that we take
the search text in real time while the user is typing, instead of the
last active search state.
"""
def __init__(self, preview_search=Never()):
self.preview_search = to_cli_filter(preview_search)
def _get_search_text(self, cli):
"""
The text we are searching for.
"""
# When the search buffer has focus, take that text.
if self.preview_search(cli) and cli.is_searching and cli.buffers[SEARCH_BUFFER].text:
return cli.buffers[SEARCH_BUFFER].text
# Otherwise, take the text of the last active search.
else:
return cli.search_state.text
def run(self, cli, document, tokens):
search_text = self._get_search_text(cli)
ignore_case = cli.is_ignoring_case
if search_text and not cli.is_returning:
# For each search match, replace the Token.
for index in document.find_all(search_text, ignore_case=ignore_case):
if index == document.cursor_position:
token = Token.SearchMatch.Current
else:
token = Token.SearchMatch
for x in range(index, index + len(search_text)):
tokens[x] = (token, tokens[x][1])
return tokens, lambda i: i
def invalidation_hash(self, cli, document):
search_text = self._get_search_text(cli)
# When the search state changes, highlighting will be different.
return (
search_text,
cli.is_returning,
# When we search for text, and the cursor position changes. The
# processor has to be applied every time again, because the current
# match is highlighted in another color.
(search_text and document.cursor_position),
)
class HighlightSelectionProcessor(Processor):
"""
Processor that highlights the selection in the document.
"""
def run(self, cli, document, tokens):
# In case of selection, highlight all matches.
selection_range = document.selection_range()
if selection_range:
from_, to = selection_range
for i in range(from_, to):
tokens[i] = (Token.SelectedText, tokens[i][1])
return tokens, lambda i: i
def invalidation_hash(self, cli, document):
# When the search state changes, highlighting will be different.
return (
document.selection_range(),
)
class PasswordProcessor(Processor):
"""
Processor that turns masks the input. (For passwords.)
"""
def __init__(self, char='*'):
self.char = char
def run(self, cli, document, tokens):
# Returns (new_token_list, cursor_index_to_token_index_f)
return [(token, self.char * len(text)) for token, text in tokens], lambda i: i
class HighlightMatchingBracketProcessor(Processor):
"""
When the cursor is on or right after a bracket, it highlights the matching
bracket.
"""
_closing_braces = '])}>'
def __init__(self, chars='[](){}<>'):
self.chars = chars
def run(self, cli, document, tokens):
def replace_token(pos):
""" Replace token in list of tokens. """
tokens[pos] = (Token.MatchingBracket, tokens[pos][1])
def apply_for_document(document):
""" Find and replace matching tokens. """
if document.current_char in self.chars:
pos = document.matching_bracket_position
if pos:
replace_token(document.cursor_position)
replace_token(document.cursor_position + pos)
return True
# Apply for character below cursor.
applied = apply_for_document(document)
# Otherwise, apply for character before cursor.
d = document
if not applied and d.cursor_position > 0 and d.char_before_cursor in self._closing_braces:
apply_for_document(Document(d.text, d.cursor_position - 1))
return tokens, lambda i: i
def invalidation_hash(self, cli, document):
on_brace = document.current_char in self.chars
after_brace = document.char_before_cursor in self.chars
if on_brace:
return (True, document.cursor_position)
elif after_brace and document.char_before_cursor in self._closing_braces:
return (True, document.cursor_position - 1)
else:
# Don't include the cursor position in the hash if we are not *on*
# a brace. We don't have to rerender the output, because it will be
# the same anyway.
return False
class BracketsMismatchProcessor(Processor):
"""
Processor that replaces the token type of bracket mismatches by an Error.
"""
error_token = Token.Error
def run(self, cli, document, tokens):
stack = [] # Pointers to the result array
for index, (token, text) in enumerate(tokens):
top = tokens[stack[-1]][1] if stack else ''
if text in '({[]})':
if text in '({[':
# Put open bracket on the stack
stack.append(index)
elif (text == ')' and top == '(' or
text == '}' and top == '{' or
text == ']' and top == '['):
# Match found
stack.pop()
else:
# No match for closing bracket.
tokens[index] = (self.error_token, text)
# Highlight unclosed tags that are still on the stack.
for index in stack:
tokens[index] = (Token.Error, tokens[index][1])
return tokens, lambda i: i
class BeforeInput(Processor):
"""
Insert tokens before the input.
"""
def __init__(self, get_tokens):
assert callable(get_tokens)
self.get_tokens = get_tokens
def run(self, cli, document, tokens):
tokens_before = self.get_tokens(cli)
shift_position = token_list_len(tokens_before)
return tokens_before + tokens, lambda i: i + shift_position
@classmethod
def static(cls, text, token=Token):
def get_static_tokens(cli):
return [(token, text)]
return cls(get_static_tokens)
def __repr__(self):
return '%s(get_tokens=%r)' % (
self.__class__.__name__, self.get_tokens)
class AfterInput(Processor):
"""
Insert tokens after the input.
"""
def __init__(self, get_tokens):
assert callable(get_tokens)
self.get_tokens = get_tokens
def run(self, cli, document, tokens):
return tokens + self.get_tokens(cli), lambda i: i
@classmethod
def static(cls, text, token=Token):
def get_static_tokens(cli):
return [(token, text)]
return cls(get_static_tokens)
def __repr__(self):
return '%s(get_tokens=%r)' % (
self.__class__.__name__, self.get_tokens)
class ShowLeadingWhiteSpaceProcessor(Processor):
"""
Make leading whitespace visible.
"""
def __init__(self, token=Token.LeadingWhiteSpace, char='\xb7'):
self.token = token
self.char = char
def run(self, cli, document, tokens):
# Walk through all te tokens.
t = (self.token, self.char)
is_start_of_line = True
for i in range(len(tokens)):
char = tokens[i][1]
if is_start_of_line and char == ' ':
tokens[i] = t
elif char == '\n':
is_start_of_line = True
else:
is_start_of_line = False
return tokens, lambda i: i
class ShowTrailingWhiteSpaceProcessor(Processor):
"""
Make trailing whitespace visible.
"""
def __init__(self, token=Token.TrailingWhiteSpace, char='\xb7'):
self.token = token
self.char = char
def run(self, cli, document, tokens):
# Walk backwards through all te tokens.
t = (self.token, self.char)
is_end_of_line = True
for i in range(len(tokens) - 1, -1, -1):
char = tokens[i][1]
if is_end_of_line and char == ' ':
tokens[i] = t
elif char == '\n':
is_end_of_line = True
else:
is_end_of_line = False
return tokens, lambda i: i
class ConditionalProcessor(Processor):
"""
Processor that applies another processor, according to a certain condition.
Example:
# Create a function that returns whether or not the processor should
# currently be applied.
def highlight_enabled(cli):
return true_or_false
# Wrapt it in a `ConditionalProcessor` for usage in a `BufferControl`.
BufferControl(input_processors=[
ConditionalProcessor(HighlightSearchProcessor(),
Condition(highlight_enabled))])
"""
def __init__(self, processor, filter):
assert isinstance(processor, Processor)
self.processor = processor
self.filter = to_cli_filter(filter)
def run(self, cli, document, tokens):
# Run processor when enabled.
if self.filter(cli):
return self.processor.run(cli, document, tokens)
else:
return tokens, lambda i: i
def invalidation_hash(self, cli, document):
# When enabled, use the hash of the processor. Otherwise, just use
# False.
if self.filter(cli):
return (True, self.processor.invalidation_hash(cli, document))
else:
return False
def __repr__(self):
return '%s(processor=%r, filter=%r)' % (
self.__class__.__name__, self.processor, self.filter)
|
|
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""
======================================================
Microprocess - A class supporting concurrent execution
======================================================
A microprocess is a class supporting parallel execution, provided by
forming a wrapper around a generator. It also provides a place for context
to be stored about the generator.
* A component is based on a microprocess - giving it its thread of execution.
* The Scheduler runs microprocesses that have been 'activated'
*This is an Axon internal. If you are writing components you do not need to
understand this as you will normally not use it directly.*
Developers wishing to use Axon in other ways or understand its implementation
shoudl read on with interest!
Basic Usage
-----------
Making and using a microprocess is easy:
1. Subclass microprocess writing your own main() generator method
2. Create and 'activate' it
3. Run the scheduler so it is executed
Specifically, classes that subclass microprocess, and implement a main()
generator function can be activated, and scheduled by the scheduler/microthread
systems. Essentially a microprocess provides a minimal runtime context for the
scheduling & thread handling system.
In more detail:
1. Subclass a microprocess, overriding the main() generator method to make your
own that yields non-zero/False values::
class Loopy(microprocess):
def __init__(self, num):
self.num = num
super(Loopy, self).__init__()
def main(self):
yield 1
while 1:
print "we loop forever", self.num
yield 1
2. Instantiate and activate a few (note these are two separate steps!)::
mp1=Loopy(1)
mp1.activate()
mp2=Loopy(2)
mp2.activate()
mp3=Loopy(3).activate() # a more convenient shorthand
3. If you haven't already, start the scheduler to cause them to be run. The
call will return when all microprocesses have finished executing (which is
*never* in this example case)::
>>> scheduler.run.runThreads()
we loop forever 1
we loop forever 2
we loop forever 3
we loop forever 1
we loop forever 2
we loop forever 3
we loop forever 1
we loop forever 2
... etc ...
Pause a microprocess whilst it is running by calling the pause() method. Wake it
up again by calling unpause(). Pausing a microprocess means that it will cease
to be executed until something else unpauses it. When unpaused it picks up from
where it left off.
More detail
-----------
Essentially a microprocess provides a context for scheduling generators,
and treating them similar to processes/threads. It provides basic facilities to
support the activation (starting), pausing, unpausing and termination of a
generator.
To start a microprocess running, you must create it and then activate it.
Activation is a separate step to allow you to control exactly when you want
a microprocess to actually start running. Once activated, running the scheduler
will cause your generator to be executed along with all other active
microprocesses.
Every yield statement in your generator hands back control, allowing Axon
to schedule other microprocesses that may be running.
You can yield any value you like except zero or False (which are reserved for
future use).
When a microprocess finishes, the scheduler calls its _closeDownMicroprocess()
method. You can either override this in your subclass, or specify a
closeDownValue when initialising microprocess. The scheduler will act on the
return value if it recognises it - see the Scheduler module for more details.
Alternative ways of defining the generator/thread
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Subclass microprocess and write your generator as a differently
named method, for example foo(), and to then specify the *name* of the
"mainmethod" when you ask the microproces to activate::
class MyMicroprocess(microprocess):
def foo(self):
yield 1
while 1:
print "we loop forever!"
yield 1
mp = MyMicroprocess()
mp.activate(mainmethod="foo")
scheduler.run.runThreads()
Alternatively, you can instantiate a microprocess providing your own generator::
def bar():
yield 1
while 1:
print "we loop forever!"
yield 1
mp = MyMicroprocess(thread=bar())
mp.activate()
scheduler.run.runThreads()
Note that this last approach removes the ability of the microprocess to be
prematurely stopped by calling its stop() method.
Microprocess lifecycle in detail
--------------------------------
In terms of runtime a microprocess can
be viewed to have 2 different life cycles - that which an external user
sees, and that which the microprocess sees.
In terms of runtime life cycle viewed externally, a microprocess is created,
activated, and then has its next method repeatedly called until a StopIteration
exception is raised, at which point the microprocess is deleted. In terms
of a more traditional approach the next call approximates to a timeslice
being allocated to a process/thread.
The value returned by next() should be non-zero (reserved for future use). The
scheduler calling next() may also recognise some specific values - see the
Axon.Scheduler.scheduler class for more information.
The runtime life cycle from the view of the microprocess stems from the
fact that a generator wraps a thread of control, by effectively treating
the program counter like a static variable. The following describes this
runtime from the microprocess's point of view.
First the '__init__' function is called during initialisation at object
creation time. This results in a non-active, non-running microprocess.
Activation has been deliberately separated from creation and initialisation.
At some point in the future, the microprocess's activate method is called,
activating the object. When the object is activated, an internal call
to a '_microprocessGenerator' occurs. This function in fact results in
the return object being a generator, which can then have its next method
called repeatedly. This generator is then stored as an attribute of the
microprocess class.
The following describe the flow of control the generator takes when the
generator is provided with a flow of control/time slice via it's next
method. Initially, it creates a local generator object - 'pc' - by calling
the object's main method. (This allows the client of the microprocess class
to provide their own generator if they wish.) This is necessary due to
the fact that any function containing a 'yield' keyword is a generator -
the 'yield' keyword cannot be abstracted away. Next, inside a loop, the
microprocess calls the next() method of its local generator object 'pc' -
effectively providing a time slice to the user of the microprocess class.
Any result provided by the timeslice is then yielded (returned) to the
client of the generator. However if the microprocess has its stopped
flag set, the microprocess generator simply yields a null value, followed
by stopping.
This all boils down to checking to see if the microprocess is not stopped
prior to running the body of a generator formed from the main method of the
class. The intent here is that users will inherit from
the microprocess class, and then reimplement the main method, which
periodically yields control. If the user/inheriting class does not implement
a main method, then the system provides a stub that simply returns.
Pausing and unpausing of microprocesses has been delegated to the scheduler to
allow Axon systems to not consume CPU cycles when idle. When a microprocess is
paused the scheduler simply never calls its next() method until it is unpaused.
As such, calls to pause() and unpause() are actually relayed to the scheduler.
The microprocess class uses a dummy scheduler _NullScheduler until it is
actually activated. This is done so pause() and unpause() calls can be silently
absorbed whilst a microprocess is not yet active.
Essentially the microprocess provides a context for scheduling generators,
and treating them similar to processes/threads.
Clients are not expected to use the microprocess class itself directly -
they are expected to subclass the microprocess class. Subclasses do need
however to call the microprocess constructor. A minimal client class could
look like this::
from Axon.Microprocess import microprocess
class automaton(microprocess):
def __init__(self):
self.Microprocess() # Call superclass constructor
def main:
while 1:
yield 1
print "Hello Again"
This microprocess would then be run by a wrapper as follows::
import Axon.Microprocess as microprocess
import Axon.Scheduler as scheduler
s = scheduler.scheduler()
a = automaton()
a.activate()
s.runThreads()
The component class does this, and adds further facilities for
inter-microprocess communication. Likewise, the scheduler class subclasses
microprocess so that it can be scheduled in parallel with other tasks.
As noted previously, every microprocess object has access to a debugger,
which is accessed via the local attribute self.debugger, which we shall
return to later. Likewise every microprocess object contains a reference
to a scheduler.
Internal flags/state
--------------------
* **id** and **name** - unique identifiers. No other Axon entity will have the
same name or id.
* **init** - a flag indicating if the microprocess has been correctly
initialised.
* **stopped** - Indicates that the microprocess has run and since stopped.
* **__thread** - the generator object that gets executed whenever next() is
called. Is actually an internally created generator that wraps the one
created by the main() method.
* **scheduler** - The scheduler that controls execution of this microprocess.
When not yet activated a dummy scheduler (NullScheduler) is used instead.
* **tracker** - The coordinating assistant tracker to be used by this
microprocess.
* **debugger** - A local debugging object. (See the debug class docs for more
detail)
Note that the paused/awake state of a microprocess is something maintained and
managed by the scheduler; not the microprocess itself.
"""
import time
from Axon.util import removeAll
from Axon.idGen import strId, numId, tupleId
from Axon.debug import debug
import Axon.Base
import Axon.CoordinatingAssistantTracker as cat
from Axon.util import next
class _NullScheduler(object):
"""\
A dummy scheduler, used by microprocess when it has not yet been activated
(and therefore isn't yet assigned to a real scheduler).
Provides dummy versions of the methods a microprocess may wish to call to
get stuff done.
"""
def wakeThread(self,mprocess):
"""Dummy method - does nothing."""
pass
def pauseThread(self,mprocess):
"""Dummy method - does nothing."""
pass
def isThreadPaused(self,mprocess):
"""Dummy method - does nothing."""
return False
_nullscheduler = _NullScheduler()
class microprocess(Axon.Base.AxonObject):
"""\
microprocess([thread][,closeDownValue]) -> new microprocess object
Creates a new microprocess object (not yet activated). You can optionally
specify an alternative generator to be used instead of the one the microprocess
would ordinarily create for itself.
Keyword arguments:
- thread -- None, or an alternative generator to be the thread of execution in this microprocess.
- closeDownValue -- Value to be returned when the microprocess has finished and _closeDownMicroprocess() is called (default=0)
"""
schedulerClass = None
trackerClass = None
def setTrackerClass(cls, newTrackerClass):
"""\
Static method, for setting the default coordinating assistant tracker for
microprocesses.
XXX - Not used atm afaik? (Matt 21/03/2007)
"""
cls.trackerClass = newTrackerClass
setTrackerClass=classmethod(setTrackerClass)
def setSchedulerClass(cls, newSchedulerClass):
"""\
Static method, for setting the default scheduler for microprocesses.
"""
cls.schedulerClass = newSchedulerClass
setSchedulerClass=classmethod(setSchedulerClass)
def __init__(self, thread = None, closeDownValue = 0, tag=""):
"""\
Microprocess initialiser.
Subclasses must call this using the idiom super(TheClass, self).__init__()
"""
self.init = 1
self.id,self.name = tupleId(self)
self.name = self.name + tag
self.__stopped = 0
if thread is not None:
self.__thread = thread
else:
self.__thread = None # Explicit better than implicit
self.closeDownValue = closeDownValue
self.scheduler = _nullscheduler
self.tracker=cat.coordinatingassistanttracker.getcat()
# If the client has defined a debugger in their class we don't want to override it.
# However if they haven't, we provide them with one
if not 'debugger' in list(self.__dict__.keys()):
self.debugger = debug()
self.debugger.useConfig()
if self.debugger.areDebugging("microprocess.__init__", 5):
self.debugger.debugmessage("microprocess.__init__", "Defining debugger for self", self.__class__)
def __str__(self):
"""Standard function for rendering the object as a string."""
result = ""
result = result + self.name + " :"
result = result + self.id.__str__() + " :"
result = result + self.init.__str__() + " :"
result = result + self.__stopped.__str__() + " :"
return result
def __next__(self): # Python 3 compatibility
return self.next()
def next(self):
"""\
Calls next() of the internal generator - lets you drop a microprocess in
somewhere where you'd ordinarily stick a generator.
Internally this calls self.__thread.next() to pass the timeslice down to
the actual generator
"""
# return self.__thread.next()
return next(self.__thread) # Python 3 idiom (via helper in python2)
def _isStopped(self):
"""\
Returns True if this microprocess has been running but has since been
halted or terminated of its own accord. Otherwise returns False.
"""
if self.debugger.areDebugging("microprocess._isStopped", 1):
self.debugger.debugmessage("microprocess._isStopped", "self.stopped",self.__stopped)
return self.__stopped == 1
def _isRunnable(self):
"""
Returns True if the microprocess is active and awake, or paused.
This query is actually passed on to this microprocess's scheduler.
"""
if self.debugger.areDebugging("microprocess._isRunnable", 10):
self.debugger.debugmessage("microprocess._isRunnable", "self.scheduler.isMProcessPaused(self)", self.scheduler.isMProcessPaused(self))
return not self.scheduler.isThreadPaused(self)
def stop(self):
"""\
Halts the microprocess, no way to "unstop"
"""
if self.debugger.areDebugging("microprocess.stop", 1):
self.debugger.debugmessage("microprocess.stop", "Microprocess STOPPED", self.id,self.name,self)
self.__stopped = 1
self.scheduler = _nullscheduler
def pause(self):
"""\
Pauses the microprocess.
If done by the microprocess itself, the microprocess will pause at the
next point it 'yields'.
Internally, the request is forwarded to this microprocesses scheduler.
"""
if self.debugger.areDebugging("microprocess.pause", 1):
self.debugger.debugmessage("microprocess.pause", "Microprocess PAUSED", self.id,self.name,self)
self.scheduler.pauseThread(self)
def unpause(self):
"""\
Un-pauses the microprocess.
This is provided to allow other microprocesses to 'wake up' this one.
This can only be performed by an external microprocess - if you are paused
there is no way you can unpause yourself!
Does nothing if microprocess has been stopped.
Internally, the request is forwarded to this microprocess's scheduler.
"""
if self.debugger.areDebugging("microprocess.unpause", 1):
self.debugger.debugmessage("microprocess.unpause", "Microprocess UNPAUSED", self.id,self.name,self)
self.scheduler.wakeThread(self)
def _unpause(self):
"""DEPRECATED - use M.unpause() instead"""
if self.debugger.areDebugging("microprocess._unpause", 1):
self.debugger.debugmessage("microprocess._unpause", "Microprocess UNPAUSED", self.id,self.name,self)
noisydeprecationwarning = "Use self.unpause() rather than self._unpause(). self._unpause() will be deprecated."
print (noisydeprecationwarning)
return self.unpause()
def main(self):
"""\
'main' thread of execution stub function.
Client classes are expected to override this.
Write your replacement as a generator (a method with 'yield' statements
in it). 'Yield' any non-zero values you like regularly to hand control to
the scheduler so other microprocesses can get a turn at executing. Your
code must therefore not block - eg. waiting on a system call or event.
If you miss this off a class that directly subclass's microprocess, your program
will run, but it will not do what you want!
"""
if self.debugger.areDebugging("microprocess.main", 0):
self.debugger.debugmessage("microprocess.main", self.name,"OI! You're only supposed to blow the bloody doors off!")
self.debugger.debugmessage("microprocess.main", self.name,"You're likely to have called WaitComplete *BUT* with a function call not a generator call")
"If you ever see the above message in your debug output, you've made a big mistake!"
yield 1
return
def _microprocessGenerator(self,someobject, mainmethod="main"):
"""\
This contains the mainloop for a microprocess, returning a
generator object. Creates the thread of control by calling the
class's main method, then in a loop repeatedly calls the resulting
generator's next method providing the object with time slices.
After each time slice, the _microprocessGenerator yields control
back to its caller.
Keyword arguments:
- someobject -- the object containing the main method (usually 'self')
- mainmethod -- *name* of the method that is the generator to be run as the thread.
"""
pc = someobject.__getattribute__(mainmethod)()
while(1):
# Continually try to run the code, and then release control
if someobject._isStopped():
# Microprocess has stopped
yield None
return
else:
# v = pc.next() # python 2
v = next(pc)
yield v # Yield control back - making us into a generator function
def activate(self, Scheduler=None, Tracker=None, mainmethod="main"):
"""\
Call to activate this microprocess, so it can start to be executed by a
scheduler. Usual usage is to simply call x.activate()
You can optionally specify a specific scheduler or tracker to use (instead of the
defaults). You can also specify that a different method is the 'main' generator.
Keyword arguments:
- Scheduler -- None to use the default scheduler; or an alternate scheduler.
- Tracker -- None to use the default coordinating assistant tracker; or an alternative one.
- mainmethod -- Optional. The name of the 'main' method of this microprocess (default="main")
"""
# call the _microprocessGenerator function to create a generator
# object, places this into the thread attribute of the microprocess
# and appends the component to the scheduler's run queue.
if self.debugger.areDebugging("microprocess.activate", 1):
self.debugger.debugmessage("microprocess.activate", "Activating microprocess",self)
if not self.__thread:
self.__thread = self._microprocessGenerator(self,mainmethod)
#
# Whilst a basic microprocess does not "need" a local scheduler,
# classes inheriting from microprocess may well wish to do so.
# (Specifically the component class needs that capability)
#
if Scheduler is not None:
if self.debugger.areDebugging("microprocess.activate", 1):
self.debugger.debugmessage("microprocess.activate", "Activating microprocess",self)
Scheduler._addThread(self)
self.scheduler = Scheduler
else:
self.__class__.schedulerClass.run._addThread(self)
self.scheduler = self.__class__.schedulerClass.run
if Tracker is not None:
self.tracker = Tracker
else:
pass
if self.debugger.areDebugging("microprocess.activate", 5):
self.debugger.debugmessage("microprocess.activate", "Using Scheduler",self.scheduler)
return self
def _closeDownMicroprocess(self):
"""\
Stub method that is overridden internally in Axon but not clients
Called by scheduler to ask microprocess to perform any desired shutdown
tasks. The scheduler also processes any IPC objects in the return value.
"""
return self.closeDownValue
def run(self):
"""\
run - starts the scheduler for this microprocess and runs it.
This is a convenient shortcut to activate and run this microprocess and
any other microprocesses that have already been activated (with the same
scheduler).
"""
self.activate()
self.__class__.schedulerClass.run.runThreads()
if __name__ == '__main__':
print ("Test code currently disabled")
if 0:
def microProcessThreadTest():
class myProcess(microprocess):
def main(self):
i = 100
yield wouldblock(self)
while(i):
i = i -1
print ("myProcess",self.name, ":", "hello World")
yield notify(self,None, 10, "this")
threadfactory = microthread()
r = scheduler()
for i in range(5):
p = myProcess(i)
t = threadfactory.activate(p)
r._addThread(t)
context = r.runThreads()
microProcessThreadTest()
|
|
"""\
Example.
%(prog)s production.ini
"""
from webtest import TestApp
from snovault import STORAGE
from snovault.elasticsearch import ELASTIC_SEARCH
import atexit
import datetime
import elasticsearch.exceptions
import json
import logging
import os
import psycopg2
import select
import signal
import socket
import sqlalchemy.exc
import sys
import threading
import time
from urllib.parse import parse_qsl
log = logging.getLogger(__name__)
EPILOG = __doc__
DEFAULT_TIMEOUT = 60
PY2 = sys.version_info[0] == 2
# We need this because of MVCC visibility.
# See slide 9 at http://momjian.us/main/writings/pgsql/mvcc.pdf
# https://devcenter.heroku.com/articles/postgresql-concurrency
def run(testapp, timeout=DEFAULT_TIMEOUT, dry_run=False, path='/index', control=None, update_status=None):
assert update_status is not None
timestamp = datetime.datetime.now().isoformat()
update_status(
status='connecting',
timestamp=timestamp,
timeout=timeout,
)
# Make sure elasticsearch is up before trying to index.
if path == '/index_file':
es = testapp.app.registry['snp_search']
else:
es = testapp.app.registry[ELASTIC_SEARCH]
# Wait until cluster comes up
es.cluster.health(wait_for_status='yellow', request_timeout=60)
es.info()
log.info('es_index_listener given path: ' + path)
max_xid = 0
DBSession = testapp.app.registry[STORAGE].write.DBSession
engine = DBSession.bind # DBSession.bind is configured by app init
# noqa http://docs.sqlalchemy.org/en/latest/faq.html#how-do-i-get-at-the-raw-dbapi-connection-when-using-an-engine
connection = engine.pool.unique_connection()
try:
connection.detach()
conn = connection.connection
conn.autocommit = True
conn.set_session(readonly=True)
sockets = [conn]
if control is not None:
sockets.append(control)
recovery = None
listening = False
with conn.cursor() as cursor:
while True:
if not listening:
# cannot execute LISTEN during recovery
cursor.execute("""SELECT pg_is_in_recovery();""")
recovery, = cursor.fetchone()
if not recovery:
# http://initd.org/psycopg/docs/advanced.html#asynchronous-notifications
cursor.execute("""LISTEN "snovault.transaction";""")
log.debug("Listener connected")
listening = True
cursor.execute("""SELECT txid_current_snapshot();""")
snapshot, = cursor.fetchone()
timestamp = datetime.datetime.now().isoformat()
update_status(
listening=listening,
recovery=recovery,
snapshot=snapshot,
status='indexing',
timestamp=timestamp,
max_xid=max_xid,
)
try:
res = testapp.post_json(path, {
'record': True,
'dry_run': dry_run,
'recovery': recovery,
})
except Exception as e:
timestamp = datetime.datetime.now().isoformat()
log.exception('index failed at max xid: %d', max_xid)
update_status(error={
'error': repr(e),
'max_xid': max_xid,
'timestamp': timestamp,
})
else:
timestamp = datetime.datetime.now().isoformat()
result = res.json
result['stats'] = {
k: int(v) for k, v in parse_qsl(
res.headers.get('X-Stats', ''))
}
result['timestamp'] = timestamp
update_status(last_result=result)
if result.get('indexed', 0):
update_status(result=result)
log.info(result)
update_status(
status='waiting',
timestamp=timestamp,
max_xid=max_xid,
)
# Wait on notifcation
readable, writable, err = select.select(sockets, [], sockets, timeout)
if err:
raise Exception('Socket error')
if control in readable:
command = control.recv(1)
log.debug('received command: %r', command)
if not command:
# Other end shutdown
return
if conn in readable:
conn.poll()
while conn.notifies:
notify = conn.notifies.pop()
xid = int(notify.payload)
max_xid = max(max_xid, xid)
log.debug('NOTIFY %s, %s', notify.channel, notify.payload)
finally:
connection.close()
class ErrorHandlingThread(threading.Thread):
if PY2:
@property
def _kwargs(self):
return self._Thread__kwargs
@property
def _args(self):
return self._Thread__args
@property
def _target(self):
return self._Thread__target
def run(self):
timeout = self._kwargs.get('timeout', DEFAULT_TIMEOUT)
update_status = self._kwargs['update_status']
control = self._kwargs['control']
while True:
try:
self._target(*self._args, **self._kwargs)
except (psycopg2.OperationalError, sqlalchemy.exc.OperationalError, elasticsearch.exceptions.ConnectionError) as e:
# Handle database restart
log.warning('Database not there, maybe starting up: %r', e)
timestamp = datetime.datetime.now().isoformat()
update_status(
timestamp=timestamp,
status='sleeping',
error={'error': repr(e), 'timestamp': timestamp},
)
readable, _, _ = select.select([control], [], [], timeout)
if control in readable:
command = control.recv(1)
log.debug('received command: %r', command)
if not command:
# Other end shutdown
return
log.debug('sleeping')
time.sleep(timeout)
continue
except Exception:
# Unfortunately mod_wsgi does not restart immediately
log.exception('Exception in listener, restarting process at next request.')
os.kill(os.getpid(), signal.SIGINT)
break
def composite(loader, global_conf, **settings):
listener = None
# Register before testapp creation.
@atexit.register
def join_listener():
if listener:
log.debug('joining listening thread')
listener.join()
path = settings.get('path', '/index')
# Composite app is used so we can load the main app
app_name = settings.get('app', None)
app = loader.get_app(app_name, global_conf=global_conf)
username = settings.get('username', 'IMPORT')
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': username,
}
testapp = TestApp(app, environ)
# Use sockets to integrate with select
controller, control = socket.socketpair()
timestamp = datetime.datetime.now().isoformat()
status_holder = {
'status': {
'status': 'starting listener',
'started': timestamp,
'errors': [],
'results': [],
},
}
def update_status(error=None, result=None, indexed=None, **kw):
# Setting a value in a dictionary is atomic
status = status_holder['status'].copy()
status.update(**kw)
if error is not None:
status['errors'] = [error] + status['errors'][:2]
if result is not None:
status['results'] = [result] + status['results'][:9]
status_holder['status'] = status
kwargs = {
'testapp': testapp,
'control': control,
'update_status': update_status,
'path': path,
}
if 'timeout' in settings:
kwargs['timeout'] = float(settings['timeout'])
listener = ErrorHandlingThread(target=run, name='listener', kwargs=kwargs)
listener.daemon = True
log.debug('starting listener')
listener.start()
# Register before testapp creation.
@atexit.register
def shutdown_listener():
log.debug('shutting down listening thread')
control # Prevent early gc
controller.shutdown(socket.SHUT_RDWR)
def status_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type', 'application/json; charset=utf-8')]
start_response(status, response_headers)
body = json.dumps(status_holder['status'])
return [body.encode("utf-8")]
return status_app
def internal_app(configfile, app_name=None, username=None):
from webtest import TestApp
from pyramid import paster
app = paster.get_app(configfile, app_name)
if not username:
username = 'IMPORT'
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': username,
}
return TestApp(app, environ)
def main():
import argparse
parser = argparse.ArgumentParser(
description="Listen for changes from postgres and index in elasticsearch",
epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--app-name', help="Pyramid app name in configfile")
parser.add_argument(
'--username', '-u', default='INDEXER', help="Import username")
parser.add_argument(
'--dry-run', action='store_true', help="Don't post to ES, just print")
parser.add_argument(
'-v', '--verbose', action='store_true', help="Print debug level logging")
parser.add_argument(
'--poll-interval', type=int, default=DEFAULT_TIMEOUT,
help="Poll interval between notifications")
parser.add_argument(
'--path', default='/index',
help="Path of indexing view (/index or /index_file)")
parser.add_argument('config_uri', help="path to configfile")
args = parser.parse_args()
logging.basicConfig()
testapp = internal_app(args.config_uri, args.app_name, args.username)
# Loading app will have configured from config file. Reconfigure here:
if args.verbose or args.dry_run:
logging.getLogger('snovault').setLevel(logging.DEBUG)
return run(testapp, args.poll_interval, args.dry_run, args.path)
if __name__ == '__main__':
main()
|
|
# Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.middleware.csrf import get_token
from django.shortcuts import render, redirect
from django.template.loader import render_to_string
from django.http import HttpResponse
from django.contrib import messages
from django.views.generic import View
from deploy_board.settings import IS_PINTEREST
if IS_PINTEREST:
from deploy_board.settings import DEFAULT_PROVIDER, DEFAULT_CMP_IMAGE, \
DEFAULT_CMP_HOST_TYPE, DEFAULT_CMP_PINFO_ENVIRON, DEFAULT_CMP_ACCESS_ROLE, DEFAULT_CELL, \
DEFAULT_PLACEMENT, USER_DATA_CONFIG_SETTINGS_WIKI
import json
import logging
from helpers import baseimages_helper, hosttypes_helper, securityzones_helper, placements_helper, \
autoscaling_groups_helper, groups_helper, cells_helper
from helpers import clusters_helper, environs_helper, environ_hosts_helper
from helpers.exceptions import NotAuthorizedException, TeletraanException
import common
import traceback
log = logging.getLogger(__name__)
DEFAULT_PAGE_SIZE = 200
class EnvCapacityBasicCreateView(View):
def get(self, request, name, stage):
host_types = hosttypes_helper.get_by_provider(
request, DEFAULT_PROVIDER)
for host_type in host_types:
host_type['mem'] = float(host_type['mem']) / 1024
security_zones = securityzones_helper.get_by_provider_and_cell_name(
request, DEFAULT_PROVIDER, DEFAULT_CELL)
placements = placements_helper.get_by_provider_and_cell_name(
request, DEFAULT_PROVIDER, DEFAULT_CELL)
default_base_image = get_base_image_info_by_name(request, DEFAULT_CMP_IMAGE, DEFAULT_CELL)
env = environs_helper.get_env_by_stage(request, name, stage)
capacity_creation_info = {
'environment': env,
'hostTypes': host_types,
'securityZones': security_zones,
'placements': placements,
'baseImages': default_base_image,
'defaultCMPConfigs': get_default_cmp_configs(name, stage),
'defaultProvider': DEFAULT_PROVIDER,
'defaultHostType': DEFAULT_CMP_HOST_TYPE,
'defaultSeurityZone': DEFAULT_PLACEMENT
}
# cluster manager
return render(request, 'configs/new_capacity.html', {
'env': env,
'capacity_creation_info': json.dumps(capacity_creation_info)})
def post(self, request, name, stage):
log.info("Post to capacity with data {0}".format(request.body))
try:
cluster_name = '{}-{}'.format(name, stage)
cluster_info = json.loads(request.body)
log.info("Create Capacity in the provider")
if 'configs' in cluster_info:
if 'spiffe_id' in cluster_info['configs']:
log.error("Teletraan does not support user to change spiffe_id %s" % cluster_info['spiffe_id'])
raise TeletraanException("Teletraan does not support user to create spiffe_id")
clusters_helper.create_cluster_with_env(request, cluster_name, name, stage, cluster_info)
log.info("Associate cluster_name to environment")
# Update cluster info
environs_helper.update_env_basic_config(
request, name, stage, data={"clusterName": cluster_name})
log.info("Update capacity to the environment")
# set up env and group relationship
environs_helper.add_env_capacity(
request, name, stage, capacity_type="GROUP", data=cluster_name)
return HttpResponse("{}", content_type="application/json")
except NotAuthorizedException as e:
log.error("Have an NotAuthorizedException error {}".format(e))
return HttpResponse(e, status=403, content_type="application/json")
except Exception as e:
log.error("Have an error {}".format(e))
return HttpResponse(e, status=500, content_type="application/json")
class EnvCapacityAdvCreateView(View):
def get(self, request, name, stage):
host_types = hosttypes_helper.get_by_provider(
request, DEFAULT_PROVIDER)
for host_type in host_types:
host_type['mem'] = float(host_type['mem']) / 1024
security_zones = securityzones_helper.get_by_provider_and_cell_name(
request, DEFAULT_PROVIDER, DEFAULT_CELL)
placements = placements_helper.get_by_provider_and_cell_name(
request, DEFAULT_PROVIDER, DEFAULT_CELL)
cells = cells_helper.get_by_provider(request, DEFAULT_PROVIDER)
base_images = get_base_image_info_by_name(request, DEFAULT_CMP_IMAGE, DEFAULT_CELL)
base_images_names = baseimages_helper.get_image_names(
request, DEFAULT_PROVIDER, DEFAULT_CELL)
env = environs_helper.get_env_by_stage(request, name, stage)
provider_list = baseimages_helper.get_all_providers(request)
capacity_creation_info = {
'environment': env,
'hostTypes': host_types,
'securityZones': security_zones,
'placements': placements,
'cells': cells,
'baseImages': base_images,
'baseImageNames': base_images_names,
'defaultBaseImage': DEFAULT_CMP_IMAGE,
'defaultCMPConfigs': get_default_cmp_configs(name, stage),
'defaultProvider': DEFAULT_PROVIDER,
'defaultCell': DEFAULT_CELL,
'defaultHostType': DEFAULT_CMP_HOST_TYPE,
'defaultSeurityZone': DEFAULT_PLACEMENT,
'providerList': provider_list,
'configList': get_aws_config_name_list_by_image(DEFAULT_CMP_IMAGE)
}
# cluster manager
return render(request, 'configs/new_capacity_adv.html', {
'env': env,
'capacity_creation_info': json.dumps(capacity_creation_info),
'user_data_config_settings_wiki': USER_DATA_CONFIG_SETTINGS_WIKI,
'is_pinterest': IS_PINTEREST})
def post(self, request, name, stage):
log.info("Post to capacity with data {0}".format(request.body))
try:
cluster_name = '{}-{}'.format(name, stage)
cluster_info = json.loads(request.body)
log.info("Create Capacity in the provider")
clusters_helper.create_cluster(request, cluster_name, cluster_info)
log.info("Update cluster_name to environment")
# Update environment
environs_helper.update_env_basic_config(request, name, stage,
data={"clusterName": cluster_name, "IsDocker": True})
log.info("Update capacity to the environment")
# set up env and group relationship
environs_helper.add_env_capacity(
request, name, stage, capacity_type="GROUP", data=cluster_name)
return HttpResponse("{}", content_type="application/json")
except NotAuthorizedException as e:
log.error("Have an NotAuthorizedException error {}".format(e))
return HttpResponse(e, status=403, content_type="application/json")
except Exception as e:
log.error("Have an error {}", e)
return HttpResponse(e, status=500, content_type="application/json")
class ClusterConfigurationView(View):
def get(self, request, name, stage):
cluster_name = '{}-{}'.format(name, stage)
current_cluster = clusters_helper.get_cluster(request, cluster_name)
host_types = hosttypes_helper.get_by_provider(
request, DEFAULT_PROVIDER)
current_image = baseimages_helper.get_by_id(
request, current_cluster['baseImageId'])
current_cluster['baseImageName'] = current_image['abstract_name']
for host_type in host_types:
host_type['mem'] = float(host_type['mem']) / 1024
cells = cells_helper.get_by_provider(request, current_cluster['provider'])
security_zones = securityzones_helper.get_by_provider_and_cell_name(
request, current_cluster['provider'], current_cluster['cellName'])
placements = placements_helper.get_by_provider_and_cell_name(
request, current_cluster['provider'], current_cluster['cellName'])
base_images = get_base_image_info_by_name(
request, current_image['abstract_name'], current_cluster['cellName'])
base_images_names = baseimages_helper.get_image_names(
request, current_cluster['provider'], current_cluster['cellName'])
env = environs_helper.get_env_by_stage(request, name, stage)
provider_list = baseimages_helper.get_all_providers(request)
capacity_creation_info = {
'environment': env,
'cells': cells,
'hostTypes': host_types,
'securityZones': security_zones,
'placements': placements,
'baseImages': base_images,
'baseImageNames': base_images_names,
'defaultBaseImage': DEFAULT_CMP_IMAGE,
'defaultCMPConfigs': get_default_cmp_configs(name, stage),
'defaultProvider': DEFAULT_PROVIDER,
'providerList': provider_list,
'configList': get_aws_config_name_list_by_image(DEFAULT_CMP_IMAGE),
'currentCluster': current_cluster
}
return render(request, 'clusters/cluster_configuration.html', {
'env': env,
'capacity_creation_info': json.dumps(capacity_creation_info),
'user_data_config_settings_wiki': USER_DATA_CONFIG_SETTINGS_WIKI,
'is_pinterest': IS_PINTEREST})
def post(self, request, name, stage):
try:
env = environs_helper.get_env_by_stage(request, name, stage)
cluster_name = env.get('clusterName')
cluster_info = json.loads(request.body)
log.info("Update Cluster Configuration with {}", cluster_info)
cluster_name = '{}-{}'.format(name, stage)
current_cluster = clusters_helper.get_cluster(request, cluster_name)
log.info("getting current Cluster Configuration is {}", current_cluster)
if 'configs' in current_cluster and 'configs' in cluster_info:
if 'spiffe_id' in current_cluster['configs'] and 'spiffe_id' in cluster_info['configs']:
if current_cluster['configs']['spiffe_id'] != cluster_info['configs']['spiffe_id']:
log.error("Teletraan does not support user to update spiffe_id %s" % cluster_info['spiffe_id'])
raise TeletraanException("Teletraan does not support user to update spiffe_id")
if 'spiffe_id' in current_cluster['configs'] and 'spiffe_id' not in cluster_info['configs']:
log.error("Teletraan does not support user to remove spiffe_id %s" % cluster_info['spiffe_id'])
raise TeletraanException("Teletraan does not support user to remove spiffe_id")
image = baseimages_helper.get_by_id(request, cluster_info['baseImageId'])
clusters_helper.update_cluster(request, cluster_name, cluster_info)
except NotAuthorizedException as e:
log.error("Have an NotAuthorizedException error {}".format(e))
return HttpResponse(e, status=403, content_type="application/json")
except Exception as e:
log.error("Post to cluster configuration view has an error {}", e)
return HttpResponse(e, status=500, content_type="application/json")
return HttpResponse(json.dumps(cluster_info), content_type="application/json")
class ClusterCapacityUpdateView(View):
def post(self, request, name, stage):
log.info("Update Cluster Capacity with data {}".format(request.body))
try:
settings = json.loads(request.body)
cluster_name = '{}-{}'.format(name, stage)
log.info("Update cluster {0} with {1}".format(
cluster_name, settings))
minSize = int(settings['minsize'])
maxSize = int(settings['maxsize'])
clusters_helper.update_cluster_capacity(
request, cluster_name, minSize, maxSize)
except NotAuthorizedException as e:
log.error("Have an NotAuthorizedException error {}".format(e))
return HttpResponse(e, status=403, content_type="application/json")
except Exception as e:
log.error("Post to cluster capacity view has an error {}", e)
return HttpResponse(e, status=500, content_type="application/json")
return HttpResponse(json.dumps(settings), content_type="application/json")
def create_base_image(request):
params = request.POST
base_image_info = {}
base_image_info['abstract_name'] = params['abstractName']
base_image_info['provider_name'] = params['providerName']
base_image_info['provider'] = params['provider']
base_image_info['description'] = params['description']
base_image_info['cell_name'] = params['cellName']
if 'basic' in params:
base_image_info['basic'] = True
else:
base_image_info['basic'] = False
baseimages_helper.create_base_image(request, base_image_info)
return redirect('/clouds/baseimages')
def get_base_images(request):
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', DEFAULT_PAGE_SIZE))
base_images = baseimages_helper.get_all(request, index, size)
provider_list = baseimages_helper.get_all_providers(request)
cells_list = cells_helper.get_by_provider(request, DEFAULT_PROVIDER)
return render(request, 'clusters/base_images.html', {
'base_images': base_images,
'provider_list': provider_list,
'cells_list': cells_list,
'pageIndex': index,
'pageSize': DEFAULT_PAGE_SIZE,
'disablePrevious': index <= 1,
'disableNext': len(base_images) < DEFAULT_PAGE_SIZE,
})
def get_image_names_by_provider_and_cell(request, provider, cell):
image_names = baseimages_helper.get_image_names(request, provider, cell)
return HttpResponse(json.dumps(image_names), content_type="application/json")
def get_images_by_provider_and_cell(request, provider, cell):
images = baseimages_helper.get_all_by(request, provider, cell)
return HttpResponse(json.dumps(images), content_type="application/json")
def get_placements_by_provider_and_cell(request, provider, cell):
data = placements_helper.get_by_provider_and_cell_name(request, provider, cell)
return HttpResponse(json.dumps(data), content_type="application/json")
def get_security_zones_by_provider_and_cell(request, provider, cell):
data = securityzones_helper.get_by_provider_and_cell_name(request, provider, cell)
return HttpResponse(json.dumps(data), content_type="application/json")
def get_image_names(request):
params = request.GET
provider = params['provider']
env_name = params['env']
stage_name = params['stage']
cell = params.get('cell', DEFAULT_CELL)
image_names = baseimages_helper.get_image_names(request, provider, cell)
curr_image_name = None
curr_base_image = None
if 'curr_base_image' in params:
curr_base_image = params['curr_base_image']
image = baseimages_helper.get_by_id(request, curr_base_image)
curr_image_name = image.get('abstract_name')
contents = render_to_string("clusters/get_image_name.tmpl", {
'image_names': image_names,
'curr_image_name': curr_image_name,
'curr_base_image': curr_base_image,
'provider': provider,
'env_name': env_name,
'stage_name': stage_name,
})
return HttpResponse(json.dumps(contents), content_type="application/json")
def get_base_images_by_name(request):
params = request.GET
cell = params.get('cell', DEFAULT_CELL)
base_images = None
if 'name' in params:
name = params['name']
base_images = baseimages_helper.get_by_name(request, name, cell)
curr_base_image = None
if 'curr_base_image' in params:
curr_base_image = params['curr_base_image']
image = baseimages_helper.get_by_id(request, curr_base_image)
curr_image_name = image.get('abstract_name')
base_images = baseimages_helper.get_by_name(request, curr_image_name, cell)
contents = render_to_string("clusters/get_base_image.tmpl", {
'base_images': base_images,
'curr_base_image': curr_base_image,
})
return HttpResponse(json.dumps(contents), content_type="application/json")
def get_base_image_info_by_name(request, name, cell):
if name.startswith('cmp_base'):
base_images = baseimages_helper.get_acceptance_by_name(request, name, cell)
with_acceptance_rs = []
if base_images:
for image in base_images:
r = image.get('baseImage')
if r:
r['acceptance'] = image.get('acceptance', 'UNKNOWN')
with_acceptance_rs.append(r)
return with_acceptance_rs
return baseimages_helper.get_by_name(request, name, cell)
def get_base_images_by_name_json(request, name):
cell = DEFAULT_CELL
params = request.GET
if params:
cell = params.get('cell', DEFAULT_CELL)
base_images = get_base_image_info_by_name(request, name, cell)
return HttpResponse(json.dumps(base_images), content_type="application/json")
def create_host_type(request):
params = request.POST
host_type_info = {}
host_type_info['abstract_name'] = params['abstractName']
host_type_info['provider_name'] = params['providerName']
host_type_info['provider'] = params['provider']
host_type_info['description'] = params['description']
host_type_info['mem'] = float(params['mem']) * 1024
host_type_info['core'] = int(params['core'])
host_type_info['storage'] = params['storage']
if 'basic' in params:
host_type_info['basic'] = True
else:
host_type_info['basic'] = False
hosttypes_helper.create_host_type(request, host_type_info)
return redirect('/clouds/hosttypes')
def get_host_types(request):
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', DEFAULT_PAGE_SIZE))
host_types = hosttypes_helper.get_all(request, index, size)
for host_type in host_types:
host_type['mem'] = float(host_type['mem']) / 1024
provider_list = baseimages_helper.get_all_providers(request)
return render(request, 'clusters/host_types.html', {
'host_types': host_types,
'provider_list': provider_list,
'pageIndex': index,
'pageSize': DEFAULT_PAGE_SIZE,
'disablePrevious': index <= 1,
'disableNext': len(host_types) < DEFAULT_PAGE_SIZE,
})
def get_host_types_by_provider(request):
params = request.GET
provider = params['provider']
curr_host_type = None
if 'curr_host_type' in params:
curr_host_type = params['curr_host_type']
host_types = hosttypes_helper.get_by_provider(request, provider)
for host_type in host_types:
host_type['mem'] = float(host_type['mem']) / 1024
contents = render_to_string("clusters/get_host_type.tmpl", {
'host_types': host_types,
'curr_host_type': curr_host_type,
})
return HttpResponse(json.dumps(contents), content_type="application/json")
def get_host_type_info(request):
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', DEFAULT_PAGE_SIZE))
host_types = hosttypes_helper.get_all(request, index, size)
for host_type in host_types:
host_type['mem'] = float(host_type['mem']) / 1024
return HttpResponse(json.dumps(host_types), content_type="application/json")
def create_security_zone(request):
params = request.POST
security_zone_info = {}
security_zone_info['abstract_name'] = params['abstractName']
security_zone_info['provider_name'] = params['providerName']
security_zone_info['provider'] = params['provider']
security_zone_info['description'] = params['description']
security_zone_info['cell_name'] = params.get('cellName', DEFAULT_CELL)
if 'basic' in params:
security_zone_info['basic'] = True
else:
security_zone_info['basic'] = False
securityzones_helper.create_security_zone(request, security_zone_info)
return redirect('/clouds/securityzones')
def get_security_zones(request):
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', DEFAULT_PAGE_SIZE))
security_zones = securityzones_helper.get_all(request, index, size)
provider_list = baseimages_helper.get_all_providers(request)
cells_list = cells_helper.get_by_provider(request, DEFAULT_PROVIDER)
return render(request, 'clusters/security_zones.html', {
'security_zones': security_zones,
'provider_list': provider_list,
'cells_list': cells_list,
'pageIndex': index,
'pageSize': DEFAULT_PAGE_SIZE,
'disablePrevious': index <= 1,
'disableNext': len(security_zones) < DEFAULT_PAGE_SIZE,
})
def get_security_zones_by_provider(request):
params = request.GET
provider = params['provider']
curr_security_zone = None
if 'curr_security_zone' in params:
curr_security_zone = params['curr_security_zone']
cell = params.get('cell', DEFAULT_CELL)
security_zones = securityzones_helper.get_by_provider_and_cell_name(request, provider, cell)
contents = render_to_string("clusters/get_security_zone.tmpl", {
'security_zones': security_zones,
'curr_security_zone': curr_security_zone,
})
return HttpResponse(json.dumps(contents), content_type="application/json")
def get_security_zone_info(request):
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', DEFAULT_PAGE_SIZE))
security_zones = securityzones_helper.get_all(request, index, size)
return HttpResponse(json.dumps(security_zones), content_type="application/json")
def create_placement(request):
params = request.POST
placement_info = {}
placement_info['abstract_name'] = params['abstractName']
placement_info['provider_name'] = params['providerName']
placement_info['provider'] = params['provider']
placement_info['description'] = params['description']
placement_info['cell_name'] = params.get('cellName', DEFAULT_CELL)
if 'basic' in params:
placement_info['basic'] = True
else:
placement_info['basic'] = False
placements_helper.create_placement(request, placement_info)
return redirect('/clouds/placements')
def get_placements(request):
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', DEFAULT_PAGE_SIZE))
placements = placements_helper.get_all(request, index, size)
provider_list = baseimages_helper.get_all_providers(request)
cells_list = cells_helper.get_by_provider(request, DEFAULT_PROVIDER)
return render(request, 'clusters/placements.html', {
'placements': placements,
'provider_list': provider_list,
'cells_list': cells_list,
'pageIndex': index,
'pageSize': DEFAULT_PAGE_SIZE,
'disablePrevious': index <= 1,
'disableNext': len(placements) < DEFAULT_PAGE_SIZE,
})
def get_placements_by_provider(request):
params = request.GET
provider = params['provider']
cell = params.get('cell', DEFAULT_CELL)
curr_placement_arrays = None
if 'curr_placement' in params:
curr_placement = params['curr_placement']
curr_placement_arrays = curr_placement.split(',')
placements = placements_helper.get_by_provider_and_cell_name(request, provider, cell)
contents = render_to_string("clusters/get_placement.tmpl", {
'placements': placements,
'curr_placement_arrays': curr_placement_arrays,
})
return HttpResponse(json.dumps(contents), content_type="application/json")
def get_placement_infos(request):
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', DEFAULT_PAGE_SIZE))
placements = placements_helper.get_all(request, index, size)
return HttpResponse(json.dumps(placements), content_type="application/json")
def parse_configs(query_dict):
configs = {}
for key, value in query_dict.iteritems():
if not value:
continue
if key.startswith('TELETRAAN_'):
name = key[len('TELETRAAN_'):]
configs[name] = value
return configs
def get_default_cmp_configs(name, stage):
config_map = {}
config_map['iam_role'] = 'base'
config_map['cmp_group'] = 'CMP,{}-{}'.format(name, stage)
config_map['pinfo_environment'] = DEFAULT_CMP_PINFO_ENVIRON
config_map['pinfo_team'] = 'cloudeng'
config_map['pinfo_role'] = 'cmp_base'
config_map['access_role'] = DEFAULT_CMP_ACCESS_ROLE
return config_map
def parse_cluster_info(request, env_name, env_stage, cluster_name):
params = request.POST
cluster_info = {}
cluster_info['capacity'] = params['capacity']
cluster_info['baseImageId'] = params['baseImageId']
cluster_info['provider'] = params['provider']
cluster_info['hostType'] = params['hostTypeId']
cluster_info['securityZone'] = params['securityZoneId']
cluster_info['placement'] = ",".join(params.getlist('placementId'))
# Update cluster name and isDocker in env
env_info = {}
env_info['clusterName'] = cluster_name
if 'isDocker' in params:
env_info['isDocker'] = True
else:
env_info['isDocker'] = False
environs_helper.update_env_basic_config(
request, env_name, env_stage, data=env_info)
return cluster_info
def delete_cluster(request, name, stage):
cluster_name = common.get_cluster_name(request, name, stage)
log.info("Delete cluster {}".format(cluster_name))
clusters_helper.delete_cluster(request, cluster_name)
# Remove group and env relationship
environs_helper.remove_env_capacity(
request, name, stage, capacity_type="GROUP", data=cluster_name)
return redirect('/env/{}/{}/config/capacity/'.format(name, stage))
def clone_cluster(request, src_name, src_stage):
try:
params = request.POST
dest_name = params.get('new_environment', src_name)
dest_stage = params.get('new_stage', src_stage + '_clone')
src_cluster_name = '{}-{}'.format(src_name, src_stage)
dest_cluster_name = '{}-{}'.format(dest_name, dest_stage)
##0. teletraan service get src env buildName
src_env = environs_helper.get_env_by_stage(request, src_name, src_stage)
build_name = src_env.get('buildName', None)
external_id = src_env.get('externalId', None)
##1. teletraan service create a new env
dest_env = environs_helper.create_env(request, {
'envName': dest_name,
'stageName': dest_stage,
'buildName': build_name,
'externalId': external_id
})
log.info('clone_cluster, created a new env %s' % dest_env)
##2. rodimus service get src_cluster config
src_cluster_info = clusters_helper.get_cluster(request, src_cluster_name)
log.info('clone_cluster, src cluster info %s' % src_cluster_info)
configs = src_cluster_info.get('configs')
if configs:
cmp_group = configs.get('cmp_group')
if cmp_group:
cmp_groups_set = set(cmp_group.split(','))
cmp_groups_set.remove(src_cluster_name)
cmp_groups_set.remove('CMP')
cmp_groups_set.add(dest_cluster_name)
# CMP needs to be the first in the list
configs['cmp_group'] = ','.join(['CMP'] + list(cmp_groups_set))
src_cluster_info['configs'] = configs
##3. rodimus service post create cluster
src_cluster_info['clusterName'] = dest_cluster_name
src_cluster_info['capacity'] = 0
log.info('clone_cluster, request clone cluster info %s' % src_cluster_info)
dest_cluster_info = clusters_helper.create_cluster_with_env(request, dest_cluster_name, dest_name, dest_stage, src_cluster_info)
log.info('clone_cluster, cloned cluster info %s' % dest_cluster_info)
##4. teletraan service update_env_basic_config
environs_helper.update_env_basic_config(request, dest_name, dest_stage,
data={"clusterName": dest_cluster_name}
)
##5. teletraan service set up env and group relationship
environs_helper.update_env_capacity(request, dest_name, dest_stage, capacity_type="GROUP",
data=[dest_cluster_name])
##6. get src script_config
src_script_configs = environs_helper.get_env_script_config(request, src_name, src_stage)
src_agent_configs = environs_helper.get_env_agent_config(request, src_name, src_stage)
src_alarms_configs = environs_helper.get_env_alarms_config(request, src_name, src_stage)
src_metrics_configs = environs_helper.get_env_metrics_config(request, src_name, src_stage)
src_webhooks_configs = environs_helper.get_env_hooks_config(request, src_name, src_stage)
##8. clone all the extra configs
if src_agent_configs:
environs_helper.update_env_agent_config(request, dest_name, dest_stage, src_agent_configs)
if src_script_configs:
environs_helper.update_env_script_config(request, dest_name, dest_stage, src_script_configs)
if src_alarms_configs:
environs_helper.update_env_alarms_config(request, dest_name, dest_stage, src_alarms_configs)
if src_metrics_configs:
environs_helper.update_env_metrics_config(request, dest_name, dest_stage, src_metrics_configs)
if src_webhooks_configs:
environs_helper.update_env_hooks_config(request, dest_name, dest_stage, src_webhooks_configs)
return HttpResponse(json.dumps(src_cluster_info), content_type="application/json")
except NotAuthorizedException as e:
log.error("Have an NotAuthorizedException error {}".format(e))
return HttpResponse(e, status=403, content_type="application/json")
except Exception as e:
log.error("Failed to clone cluster env_name: %s, stage_name: %s" % (src_name, src_stage))
log.error(traceback.format_exc())
return HttpResponse(e, status=500, content_type="application/json")
def get_aws_config_name_list_by_image(image_name):
config_map = {}
config_map['iam_role'] = 'base'
config_map['assign_public_ip'] = 'true'
if IS_PINTEREST:
config_map['pinfo_environment'] = 'prod'
config_map['raid'] = 'true'
config_map['raid_mount'] = '/mnt'
config_map['raid_device'] = '/dev/md0'
config_map['raid_fs'] = 'xfs'
config_map['ebs'] = 'true'
config_map['ebs_size'] = 500
config_map['ebs_mount'] = '/backup'
config_map['ebs_volume_type'] = 'gp2'
config_map['root_volume_size'] = 100
if image_name == DEFAULT_CMP_IMAGE:
config_map['pinfo_role'] = 'cmp_base'
config_map['pinfo_team'] = 'cloudeng'
else:
config_map['pinfo_role'] = ''
config_map['pinfo_team'] = ''
return config_map
def launch_hosts(request, name, stage):
params = request.POST
num = int(params['num'])
cluster_name = common.get_cluster_name(request, name, stage)
clusters_helper.launch_hosts(request, cluster_name, num)
return redirect('/env/{}/{}/'.format(name, stage))
def terminate_hosts(request, name, stage):
get_params = request.GET
post_params = request.POST
host_ids = None
if 'host_id' in get_params:
host_ids = [get_params.get('host_id')]
if 'hostIds' in post_params:
hosts_str = post_params['hostIds']
host_ids = [x.strip() for x in hosts_str.split(',')]
environ_hosts_helper.stop_service_on_host(request, name, stage, host_ids)
return redirect('/env/{}/{}'.format(name, stage))
def force_terminate_hosts(request, name, stage):
get_params = request.GET
post_params = request.POST
host_ids = None
if 'host_id' in get_params:
host_ids = [get_params.get('host_id')]
if 'hostIds' in post_params:
hosts_str = post_params['hostIds']
host_ids = [x.strip() for x in hosts_str.split(',')]
if 'replaceHost' in post_params:
replace_host = True
else:
replace_host = False
cluster_name = common.get_cluster_name(request, name, stage)
if not cluster_name:
groups = environs_helper.get_env_capacity(
request, name, stage, capacity_type="GROUP")
for group_name in groups:
cluster_name = group_name
clusters_helper.force_terminate_hosts(
request, cluster_name, host_ids, replace_host)
return redirect('/env/{}/{}'.format(name, stage))
def enable_cluster_replacement(request, name, stage):
cluster_name = common.get_cluster_name(request, name, stage)
clusters_helper.enable_cluster_replacement(request, cluster_name)
return redirect('/env/{}/{}/config/capacity/'.format(name, stage))
def pause_cluster_replacement(request, name, stage):
cluster_name = common.get_cluster_name(request, name, stage)
clusters_helper.pause_cluster_replacement(request, cluster_name)
return redirect('/env/{}/{}/config/capacity/'.format(name, stage))
def resume_cluster_replacement(request, name, stage):
cluster_name = common.get_cluster_name(request, name, stage)
clusters_helper.resume_cluster_replacement(request, cluster_name)
return redirect('/env/{}/{}/config/capacity/'.format(name, stage))
def cancel_cluster_replacement(request, name, stage):
cluster_name = common.get_cluster_name(request, name, stage)
clusters_helper.cancel_cluster_replacement(request, cluster_name)
return redirect('/env/{}/{}/config/capacity/'.format(name, stage))
def get_replacement_summary(request, cluster_name, event, current_capacity):
host_ids = event.get('host_ids')
state = event.get('state')
status = event.get('status')
progress_type = 'success' if status in [
'SUCCEEDING', 'SUCCEEDED'] else 'danger'
if not host_ids:
num_finished_host_ids = 0
else:
num_finished_host_ids = len(host_ids.split(','))
if state == 'COMPLETED':
if status == 'SUCCEEDED':
# successful
succeeded = num_finished_host_ids
progress_rate = 100
msg = event.get('error_message', '')
return {
'id': event.get('id'),
'state': state,
'status': status,
'startDate': event.get('start_time'),
'lastUpdateDate': event.get('last_worked_on'),
'progressType': progress_type,
'progressTip': 'Among total {} hosts, {} successfully replaced and {} are pending'.format(
succeeded, succeeded, 0),
'successRatePercentage': progress_rate,
'successRate': '{}% ({}/{})'.format(progress_rate, succeeded, succeeded),
'description': msg
}
else:
# failed
succeeded = num_finished_host_ids
progress_rate = succeeded * 100 / current_capacity
msg = event.get('error_message', '')
return {
'id': event.get('id'),
'state': state,
'status': status,
'startDate': event.get('start_time'),
'lastUpdateDate': event.get('last_worked_on'),
'progressType': progress_type,
'progressTip': 'Among total {} hosts, {} successfully replaced and {} are pending. Reason: {}'.format(
current_capacity, succeeded, current_capacity - succeeded, msg),
'successRatePercentage': progress_rate,
'successRate': '{}% ({}/{})'.format(progress_rate, succeeded, current_capacity),
'description': msg
}
else:
# on-going event
replaced_and_succeeded_hosts = groups_helper.get_replaced_and_good_hosts(
request, cluster_name)
succeeded = len(replaced_and_succeeded_hosts)
progress_rate = succeeded * 100 / current_capacity
# its not necessarily error message
on_going_msg = event.get('error_message')
return {
'id': event.get('id'),
'state': state,
'status': status,
'startDate': event.get('start_time'),
'lastUpdateDate': event.get('last_worked_on'),
'progressType': progress_type,
'progressTip': 'Among total {} hosts, {} successfully replaced and {} are pending. {}'.format(
current_capacity, succeeded, current_capacity - succeeded, on_going_msg),
'successRatePercentage': progress_rate,
'successRate': '{}% ({}/{})'.format(progress_rate, succeeded, current_capacity)
}
def cluster_replacement_progress(request, name, stage):
env = environs_helper.get_env_by_stage(request, name, stage)
cluster_name = '{}-{}'.format(name, stage)
replacement_event = clusters_helper.get_latest_cluster_replacement_progress(
request, cluster_name)
if not replacement_event:
log.info("There is no on-going replacement event for cluster %s." %
cluster_name)
return HttpResponse("There is no on-going replacement.")
# basic_cluster_info = clusters_helper.get_cluster(request, cluster_name)
# capacity = basic_cluster_info.get("capacity")
# should not respect the cluster capacity here, when min != max, the capacity is not a right number
asg_summary = autoscaling_groups_helper.get_autoscaling_summary(request, cluster_name)
desired_capacity = None
if asg_summary:
desired_capacity = asg_summary.get("desiredCapacity")
if not desired_capacity:
error_msg = "cluster %s has wrong desired_capacity: %s, asg_summary: %s" % \
(cluster_name, desired_capacity, asg_summary)
log.error(error_msg)
return HttpResponse(error_msg, status=500, content_type="application/json")
replacement_progress = get_replacement_summary(
request, cluster_name, replacement_event, desired_capacity)
html = render_to_string('clusters/replace_progress.tmpl', {
"env": env,
"replace_progress_report": replacement_progress
})
response = HttpResponse(html)
return response
def cluster_replacement_details(request, name, stage):
cluster_name = '{}-{}'.format(name, stage)
replacement_event = clusters_helper.get_latest_cluster_replacement_progress(
request, cluster_name)
if not replacement_event:
return HttpResponse("{}", content_type="application/json")
return HttpResponse(json.dumps(replacement_event), content_type="application/json")
def view_cluster_replacement_details(request, name, stage, replacement_id):
env = environs_helper.get_env_by_stage(request, name, stage)
cluster_name = '{}-{}'.format(name, stage)
replacement_event = clusters_helper.get_cluster_replacement_info(
request, cluster_name, replacement_id)
if not replacement_event:
raise Exception("Replacement Id: %s Not Found.")
basic_cluster_info = clusters_helper.get_cluster(request, cluster_name)
capacity = basic_cluster_info.get("capacity")
replacement_details = get_replacement_summary(
request, cluster_name, replacement_event, capacity)
config_histories = clusters_helper.get_cluster_replacement_config_histories(
request, cluster_name, replacement_id)
return render(request, 'clusters/cluster_replace_details.html', {
"replace": replacement_details,
"config_histories": config_histories,
"env": env
})
def view_cluster_replacement_scaling_activities(request, name, stage):
cluster_name = '{}-{}'.format(name, stage)
scaling_activities = autoscaling_groups_helper.get_scaling_activities(
request, cluster_name, 20, '')
activities = json.dumps(scaling_activities["activities"])
return HttpResponse(activities, content_type="application/json")
def view_cluster_replacement_schedule(request, name, stage, replacement_id):
env = environs_helper.get_env_by_stage(request, name, stage)
cluster_name = '{}-{}'.format(name, stage)
schedule = clusters_helper.get_cluster_replacement_schedule(
request, cluster_name, replacement_id)
return render(request, 'clusters/replace_schedule.html', {
"env": env,
"schedule": schedule
})
class ClusterHistoriesView(View):
def get(self, request, name, stage):
env = environs_helper.get_env_by_stage(request, name, stage)
cluster_name = '{}-{}'.format(name, stage)
page_index = request.GET.get('index')
page_size = request.GET.get('size')
histories = clusters_helper.get_cluster_replacement_histories(
request, cluster_name, page_index, page_size)
replace_summaries = []
if histories:
basic_cluster_info = clusters_helper.get_cluster(
request, cluster_name)
capacity = basic_cluster_info.get("capacity")
for history in histories:
replace_summaries.append(get_replacement_summary(
request, cluster_name, history, capacity))
data = {
"env": env,
"replace_summaries": replace_summaries
}
return render(request, 'clusters/replace_histories.html', data)
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, absolute_import, print_function
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from past.builtins import basestring
import logging
import re
import time
from datetime import datetime, timedelta
from dateutil.parser import parse as dateutil_parse
from sqlalchemy import Table, Column, Integer, String, Unicode, Date, DateTime, Time, or_, and_
from sqlalchemy.orm import relation
from sqlalchemy.schema import ForeignKey
from flexget import db_schema
from flexget import plugin
from flexget.event import event
from flexget.terminal import console
from flexget.manager import Session
from flexget.plugin import get_plugin_by_name
from flexget.utils import requests
from flexget.utils.database import with_session, json_synonym
from flexget.utils.simple_persistence import SimplePersistence
from flexget.utils.tools import TimedDict
Base = db_schema.versioned_base('api_trakt', 7)
AuthBase = db_schema.versioned_base('trakt_auth', 0)
log = logging.getLogger('api_trakt')
# Production Site
CLIENT_ID = '57e188bcb9750c79ed452e1674925bc6848bd126e02bb15350211be74c6547af'
CLIENT_SECRET = 'db4af7531e8df678b134dbc22445a2c04ebdbdd7213be7f5b6d17dfdfabfcdc2'
API_URL = 'https://api.trakt.tv/'
PIN_URL = 'http://trakt.tv/pin/346'
# Stores the last time we checked for updates for shows/movies
updated = SimplePersistence('api_trakt')
# Oauth account authentication
class TraktUserAuth(AuthBase):
__tablename__ = 'trakt_user_auth'
account = Column(Unicode, primary_key=True)
access_token = Column(Unicode)
refresh_token = Column(Unicode)
created = Column(DateTime)
expires = Column(DateTime)
def __init__(self, account, access_token, refresh_token, created, expires):
self.account = account
self.access_token = access_token
self.refresh_token = refresh_token
self.expires = token_expire_date(expires)
self.created = token_created_date(created)
def token_expire_date(expires):
return datetime.now() + timedelta(seconds=expires)
def token_created_date(created):
return datetime.fromtimestamp(created)
def device_auth():
data = {'client_id': CLIENT_ID}
try:
r = requests.post(get_api_url('oauth/device/code'), data=data).json()
device_code = r['device_code']
user_code = r['user_code']
expires_in = r['expires_in']
interval = r['interval']
console('Please visit {0} and authorize Flexget. Your user code is {1}. Your code expires in '
'{2} minutes.'.format(r['verification_url'], user_code, expires_in / 60.0))
log.debug('Polling for user authorization.')
data['code'] = device_code
data['client_secret'] = CLIENT_SECRET
end_time = time.time() + expires_in
console('Waiting...', end='')
# stop polling after expires_in seconds
while time.time() < end_time:
time.sleep(interval)
polling_request = requests.post(get_api_url('oauth/device/token'), data=data,
raise_status=False)
if polling_request.status_code == 200: # success
return polling_request.json()
elif polling_request.status_code == 400: # pending -- waiting for user
console('...', end='')
elif polling_request.status_code == 404: # not found -- invalid device_code
raise plugin.PluginError('Invalid device code. Open an issue on Github.')
elif polling_request.status_code == 409: # already used -- user already approved
raise plugin.PluginError('User code has already been approved.')
elif polling_request.status_code == 410: # expired -- restart process
break
elif polling_request.status_code == 418: # denied -- user denied code
raise plugin.PluginError('User code has been denied.')
elif polling_request.status_code == 429: # polling too fast
log.warning('Polling too quickly. Upping the interval. No action required.')
interval += 1
raise plugin.PluginError('User code has expired. Please try again.')
except requests.RequestException as e:
raise plugin.PluginError('Device authorization with Trakt.tv failed: {0}'.format(e))
def token_oauth(data):
try:
return requests.post(get_api_url('oauth/token'), data=data).json()
except requests.RequestException as e:
raise plugin.PluginError('Token exchange with trakt failed: {0}'.format(e))
def delete_account(account):
with Session() as session:
acc = session.query(TraktUserAuth).filter(TraktUserAuth.account == account).first()
if not acc:
raise plugin.PluginError('Account %s not found.' % account)
session.delete(acc)
def get_access_token(account, token=None, refresh=False, re_auth=False, called_from_cli=False):
"""
Gets authorization info from a pin or refresh token.
:param account: Arbitrary account name to attach authorization to.
:param unicode token: The pin or refresh token, as supplied by the trakt website.
:param bool refresh: If True, refresh the access token using refresh_token from db.
:param bool re_auth: If True, account is re-authorized even if it already exists in db.
:raises RequestException: If there is a network error while authorizing.
"""
data = {
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'redirect_uri': 'urn:ietf:wg:oauth:2.0:oob'
}
with Session() as session:
acc = session.query(TraktUserAuth).filter(TraktUserAuth.account == account).first()
if acc and datetime.now() < acc.expires and not refresh and not re_auth:
return acc.access_token
else:
if acc and (refresh or datetime.now() >= acc.expires - timedelta(days=5)) and not re_auth:
log.debug('Using refresh token to re-authorize account %s.', account)
data['refresh_token'] = acc.refresh_token
data['grant_type'] = 'refresh_token'
token_dict = token_oauth(data)
elif token:
# We are only in here if a pin was specified, so it's safe to use console instead of logging
console('Warning: PIN authorization has been deprecated. Use Device Authorization instead.')
data['code'] = token
data['grant_type'] = 'authorization_code'
token_dict = token_oauth(data)
elif called_from_cli:
log.debug('No pin specified for an unknown account %s. Attempting to authorize device.', account)
token_dict = device_auth()
else:
raise plugin.PluginError('Account %s has not been authorized. See `flexget trakt auth -h` on how to.' %
account)
try:
new_acc = TraktUserAuth(account, token_dict['access_token'], token_dict['refresh_token'],
token_dict.get('created_at', time.time()), token_dict['expires_in'])
session.merge(new_acc)
return new_acc.access_token
except requests.RequestException as e:
raise plugin.PluginError('Token exchange with trakt failed: {0}'.format(e))
def make_list_slug(name):
"""Return the slug for use in url for given list name."""
slug = name.lower()
# These characters are just stripped in the url
for char in '!@#$%^*()[]{}/=?+\\|':
slug = slug.replace(char, '')
# These characters get replaced
slug = slug.replace('&', 'and')
slug = slug.replace(' ', '-')
return slug
def get_session(account=None, token=None):
"""
Creates a requests session ready to talk to trakt API with FlexGet's api key.
Can also add user level authentication if `account` parameter is given.
:param account: An account authorized via `flexget trakt auth` CLI command. If given, returned session will be
authenticated for that account.
"""
# default to username if account name is not specified
session = requests.Session()
session.headers = {
'Content-Type': 'application/json',
'trakt-api-version': '2',
'trakt-api-key': CLIENT_ID,
}
if account:
access_token = get_access_token(account, token) if account else None
if access_token:
session.headers.update({'Authorization': 'Bearer %s' % access_token})
return session
def get_api_url(*endpoint):
"""
Get the address of a trakt API endpoint.
:param endpoint: Can by a string endpoint (e.g. 'sync/watchlist') or an iterable (e.g. ('sync', 'watchlist')
Multiple parameters can also be specified instead of a single iterable.
:returns: The absolute url to the specified API endpoint.
"""
if len(endpoint) == 1 and not isinstance(endpoint[0], basestring):
endpoint = endpoint[0]
# Make sure integer portions are turned into strings first too
url = API_URL + '/'.join(map(str, endpoint))
return url
@db_schema.upgrade('api_trakt')
def upgrade(ver, session):
if ver is None or ver <= 6:
raise db_schema.UpgradeImpossible
return ver
def get_entry_ids(entry):
"""Creates a trakt ids dict from id fields on an entry. Prefers already populated info over lazy lookups."""
ids = {}
for lazy in [False, True]:
if entry.get('trakt_movie_id', eval_lazy=lazy):
ids['trakt'] = entry['trakt_movie_id']
elif entry.get('trakt_show_id', eval_lazy=lazy):
ids['trakt'] = entry['trakt_show_id']
elif entry.get('trakt_episode_id', eval_lazy=lazy):
ids['trakt'] = entry['trakt_episode_id']
if entry.get('tmdb_id', eval_lazy=lazy):
ids['tmdb'] = entry['tmdb_id']
if entry.get('tvdb_id', eval_lazy=lazy):
ids['tvdb'] = entry['tvdb_id']
if entry.get('imdb_id', eval_lazy=lazy):
ids['imdb'] = entry['imdb_id']
if entry.get('tvrage_id', eval_lazy=lazy):
ids['tvrage'] = entry['tvrage_id']
if ids:
break
return ids
class TraktMovieTranslation(Base):
__tablename__ = 'trakt_movie_translations'
id = Column(Integer, primary_key=True, autoincrement=True)
language = Column(Unicode)
overview = Column(Unicode)
tagline = Column(Unicode)
title = Column(Unicode)
movie_id = Column(Integer, ForeignKey('trakt_movies.id'))
def __init__(self, translation, session):
super(TraktMovieTranslation, self).__init__()
self.update(translation, session)
def update(self, translation, session):
for col in translation.keys():
setattr(self, col, translation.get(col))
class TraktShowTranslation(Base):
__tablename__ = 'trakt_show_translations'
id = Column(Integer, primary_key=True, autoincrement=True)
language = Column(Unicode)
overview = Column(Unicode)
title = Column(Unicode)
show_id = Column(Integer, ForeignKey('trakt_shows.id'))
def __init__(self, translation, session):
super(TraktShowTranslation, self).__init__()
self.update(translation, session)
def update(self, translation, session):
for col in translation.keys():
setattr(self, col, translation.get(col))
def get_translations(ident, style):
url = get_api_url(style + 's', ident, 'translations')
trakt_translation = TraktShowTranslation if style == 'show' else TraktMovieTranslation
trakt_translation_id = getattr(trakt_translation, style + '_id')
translations = []
req_session = get_session()
try:
results = req_session.get(url, params={'extended': 'full'}).json()
with Session() as session:
for result in results:
translation = session.query(trakt_translation).filter(and_(
trakt_translation.language == result.get('language'),
trakt_translation_id == ident)).first()
if not translation:
translation = trakt_translation(result, session)
translations.append(translation)
return translations
except requests.RequestException as e:
log.debug('Error adding translations to trakt id %s: %s', ident, e)
class TraktGenre(Base):
__tablename__ = 'trakt_genres'
name = Column(Unicode, primary_key=True)
show_genres_table = Table('trakt_show_genres', Base.metadata,
Column('show_id', Integer, ForeignKey('trakt_shows.id')),
Column('genre_id', Unicode, ForeignKey('trakt_genres.name')))
Base.register_table(show_genres_table)
movie_genres_table = Table('trakt_movie_genres', Base.metadata,
Column('movie_id', Integer, ForeignKey('trakt_movies.id')),
Column('genre_id', Unicode, ForeignKey('trakt_genres.name')))
Base.register_table(movie_genres_table)
class TraktActor(Base):
__tablename__ = 'trakt_actors'
id = Column(Integer, primary_key=True, nullable=False)
name = Column(Unicode)
slug = Column(Unicode)
tmdb = Column(Integer)
imdb = Column(Unicode)
biography = Column(Unicode)
birthday = Column(Date)
death = Column(Date)
homepage = Column(Unicode)
def __init__(self, actor, session):
super(TraktActor, self).__init__()
self.update(actor, session)
def update(self, actor, session):
if self.id and self.id != actor.get('ids').get('trakt'):
raise Exception('Tried to update db actors with different actor data')
elif not self.id:
self.id = actor.get('ids').get('trakt')
self.name = actor.get('name')
ids = actor.get('ids')
self.imdb = ids.get('imdb')
self.slug = ids.get('slug')
self.tmdb = ids.get('tmdb')
self.biography = actor.get('biography')
if actor.get('birthday'):
self.birthday = dateutil_parse(actor.get('birthday'))
if actor.get('death'):
self.death = dateutil_parse(actor.get('death'))
self.homepage = actor.get('homepage')
def to_dict(self):
return {
'name': self.name,
'trakt_id': self.id,
'imdb_id': self.imdb,
'tmdb_id': self.tmdb,
}
show_actors_table = Table('trakt_show_actors', Base.metadata,
Column('show_id', Integer, ForeignKey('trakt_shows.id')),
Column('actors_id', Integer, ForeignKey('trakt_actors.id')))
Base.register_table(show_actors_table)
movie_actors_table = Table('trakt_movie_actors', Base.metadata,
Column('movie_id', Integer, ForeignKey('trakt_movies.id')),
Column('actors_id', Integer, ForeignKey('trakt_actors.id')))
Base.register_table(movie_actors_table)
def get_db_actors(ident, style):
actors = []
url = get_api_url(style + 's', ident, 'people')
req_session = get_session()
try:
results = req_session.get(url, params={'extended': 'full'}).json()
with Session() as session:
for result in results.get('cast'):
trakt_id = result.get('person').get('ids').get('trakt')
actor = session.query(TraktActor).filter(TraktActor.id == trakt_id).first()
if not actor:
actor = TraktActor(result.get('person'), session)
actors.append(actor)
return actors
except requests.RequestException as e:
log.debug('Error searching for actors for trakt id %s', e)
return
def get_translations_dict(translate, style):
res = {}
for lang in translate:
info = {
'overview': lang.overview,
'title': lang.title,
}
if style == 'movie':
info['tagline'] = lang.tagline
res[lang.language] = info
return res
def list_actors(actors):
res = {}
for actor in actors:
info = {
'trakt_id': actor.id,
'name': actor.name,
'imdb_id': str(actor.imdb),
'trakt_slug': actor.slug,
'tmdb_id': str(actor.tmdb),
'birthday': actor.birthday.strftime("%Y/%m/%d") if actor.birthday else None,
'biography': actor.biography,
'homepage': actor.homepage,
'death': actor.death.strftime("%Y/%m/%d") if actor.death else None,
}
res[str(actor.id)] = info
return res
class TraktEpisode(Base):
__tablename__ = 'trakt_episodes'
id = Column(Integer, primary_key=True, autoincrement=False)
tvdb_id = Column(Integer)
imdb_id = Column(Unicode)
tmdb_id = Column(Integer)
tvrage_id = Column(Unicode)
title = Column(Unicode)
season = Column(Integer)
number = Column(Integer)
number_abs = Column(Integer)
overview = Column(Unicode)
first_aired = Column(DateTime)
updated_at = Column(DateTime)
cached_at = Column(DateTime)
series_id = Column(Integer, ForeignKey('trakt_shows.id'), nullable=False)
def __init__(self, trakt_episode, session):
super(TraktEpisode, self).__init__()
self.update(trakt_episode, session)
def update(self, trakt_episode, session):
"""Updates this record from the trakt media object `trakt_movie` returned by the trakt api."""
if self.id and self.id != trakt_episode['ids']['trakt']:
raise Exception('Tried to update db ep with different ep data')
elif not self.id:
self.id = trakt_episode['ids']['trakt']
self.imdb_id = trakt_episode['ids']['imdb']
self.tmdb_id = trakt_episode['ids']['tmdb']
self.tvrage_id = trakt_episode['ids']['tvrage']
self.tvdb_id = trakt_episode['ids']['tvdb']
self.first_aired = None
if trakt_episode.get('first_aired'):
self.first_aired = dateutil_parse(trakt_episode['first_aired'], ignoretz=True)
self.updated_at = dateutil_parse(trakt_episode.get('updated_at'), ignoretz=True)
self.cached_at = datetime.now()
for col in ['title', 'season', 'number', 'number_abs', 'overview']:
setattr(self, col, trakt_episode.get(col))
@property
def expired(self):
# TODO should episode have its own expiration function?
return False
class TraktShow(Base):
__tablename__ = 'trakt_shows'
id = Column(Integer, primary_key=True, autoincrement=False)
title = Column(Unicode)
year = Column(Integer)
slug = Column(Unicode)
tvdb_id = Column(Integer)
imdb_id = Column(Unicode)
tmdb_id = Column(Integer)
tvrage_id = Column(Unicode)
overview = Column(Unicode)
first_aired = Column(DateTime)
air_day = Column(Unicode)
air_time = Column(Time)
timezone = Column(Unicode)
runtime = Column(Integer)
certification = Column(Unicode)
network = Column(Unicode)
country = Column(Unicode)
status = Column(String)
rating = Column(Integer)
votes = Column(Integer)
language = Column(Unicode)
homepage = Column(Unicode)
trailer = Column(Unicode)
aired_episodes = Column(Integer)
_translations = relation(TraktShowTranslation)
_translation_languages = Column('translation_languages', Unicode)
translation_languages = json_synonym('_translation_languages')
episodes = relation(TraktEpisode, backref='show', cascade='all, delete, delete-orphan', lazy='dynamic')
genres = relation(TraktGenre, secondary=show_genres_table)
_actors = relation(TraktActor, secondary=show_actors_table)
updated_at = Column(DateTime)
cached_at = Column(DateTime)
def to_dict(self):
return {
"id": self.id,
"title": self.title,
"year": self.year,
"slug": self.slug,
"tvdb_id": self.tvdb_id,
"imdb_id": self.imdb_id,
"tmdb_id": self.tmdb_id,
"tvrage_id": self.tvrage_id,
"overview": self.overview,
"first_aired": self.first_aired,
"air_day": self.air_day,
"air_time": self.air_time.strftime("%H:%M") if self.air_time else None,
"timezone": self.timezone,
"runtime": self.runtime,
"certification": self.certification,
"network": self.network,
"country": self.country,
"status": self.status,
"rating": self.rating,
"votes": self.votes,
"language": self.language,
"homepage": self.homepage,
"number_of_aired_episodes": self.aired_episodes,
"genres": [g.name for g in self.genres],
"updated_at": self.updated_at,
"cached_at": self.cached_at
}
def __init__(self, trakt_show, session):
super(TraktShow, self).__init__()
self.update(trakt_show, session)
def update(self, trakt_show, session):
"""Updates this record from the trakt media object `trakt_show` returned by the trakt api."""
if self.id and self.id != trakt_show['ids']['trakt']:
raise Exception('Tried to update db show with different show data')
elif not self.id:
self.id = trakt_show['ids']['trakt']
self.slug = trakt_show['ids']['slug']
self.imdb_id = trakt_show['ids']['imdb']
self.tmdb_id = trakt_show['ids']['tmdb']
self.tvrage_id = trakt_show['ids']['tvrage']
self.tvdb_id = trakt_show['ids']['tvdb']
if trakt_show.get('airs'):
airs = trakt_show.get('airs')
self.air_day = airs.get('day')
self.timezone = airs.get('timezone')
if airs.get('time'):
self.air_time = datetime.strptime(airs.get('time'), '%H:%M').time()
else:
self.air_time = None
if trakt_show.get('first_aired'):
self.first_aired = dateutil_parse(trakt_show.get('first_aired'), ignoretz=True)
else:
self.first_aired = None
self.updated_at = dateutil_parse(trakt_show.get('updated_at'), ignoretz=True)
for col in ['overview', 'runtime', 'rating', 'votes', 'language', 'title', 'year',
'runtime', 'certification', 'network', 'country', 'status', 'aired_episodes',
'trailer', 'homepage']:
setattr(self, col, trakt_show.get(col))
# Sometimes genres and translations are None but we really do want a list, hence the "or []"
self.genres = [TraktGenre(name=g.replace(' ', '-')) for g in trakt_show.get('genres') or []]
self.cached_at = datetime.now()
self.translation_languages = trakt_show.get('available_translations') or []
def get_episode(self, season, number, session, only_cached=False):
# TODO: Does series data being expired mean all episode data should be refreshed?
episode = self.episodes.filter(TraktEpisode.season == season).filter(TraktEpisode.number == number).first()
if not episode or self.expired:
url = get_api_url('shows', self.id, 'seasons', season, 'episodes', number, '?extended=full')
if only_cached:
raise LookupError('Episode %s %s not found in cache' % (season, number))
log.debug('Episode %s %s not found in cache, looking up from trakt.', season, number)
try:
ses = get_session()
data = ses.get(url).json()
except requests.RequestException:
raise LookupError('Error Retrieving Trakt url: %s' % url)
if not data:
raise LookupError('No data in response from trakt %s' % url)
episode = self.episodes.filter(TraktEpisode.id == data['ids']['trakt']).first()
if episode:
episode.update(data, session)
else:
episode = TraktEpisode(data, session)
self.episodes.append(episode)
return episode
@property
def expired(self):
"""
:return: True if show details are considered to be expired, ie. need of update
"""
# TODO stolen from imdb plugin, maybe there's a better way?
if self.cached_at is None:
log.debug('cached_at is None: %s', self)
return True
refresh_interval = 2
# if show has been cancelled or ended, then it is unlikely to be updated often
if self.year and (self.status == 'ended' or self.status == 'canceled'):
# Make sure age is not negative
age = max((datetime.now().year - self.year), 0)
refresh_interval += age * 5
log.debug('show `%s` age %i expires in %i days', self.title, age, refresh_interval)
return self.cached_at < datetime.now() - timedelta(days=refresh_interval)
@property
def translations(self):
if not self._translations:
self._translations = get_translations(self.id, 'show')
return self._translations
@property
def actors(self):
if not self._actors:
self._actors[:] = get_db_actors(self.id, 'show')
return self._actors
def __repr__(self):
return '<name=%s, id=%s>' % (self.title, self.id)
class TraktMovie(Base):
__tablename__ = 'trakt_movies'
id = Column(Integer, primary_key=True, autoincrement=False)
title = Column(Unicode)
year = Column(Integer)
slug = Column(Unicode)
imdb_id = Column(Unicode)
tmdb_id = Column(Integer)
tagline = Column(Unicode)
overview = Column(Unicode)
released = Column(Date)
runtime = Column(Integer)
rating = Column(Integer)
votes = Column(Integer)
trailer = Column(Unicode)
homepage = Column(Unicode)
language = Column(Unicode)
updated_at = Column(DateTime)
cached_at = Column(DateTime)
_translations = relation(TraktMovieTranslation, backref='movie')
_translation_languages = Column('translation_languages', Unicode)
translation_languages = json_synonym('_translation_languages')
genres = relation(TraktGenre, secondary=movie_genres_table)
_actors = relation(TraktActor, secondary=movie_actors_table)
def __init__(self, trakt_movie, session):
super(TraktMovie, self).__init__()
self.update(trakt_movie, session)
def to_dict(self):
return {
"id": self.id,
"title": self.title,
"year": self.year,
"slug": self.slug,
"imdb_id": self.imdb_id,
"tmdb_id": self.tmdb_id,
"tagline": self.tagline,
"overview": self.overview,
"released": self.released,
"runtime": self.runtime,
"rating": self.rating,
"votes": self.votes,
"language": self.language,
"homepage": self.homepage,
"trailer": self.trailer,
"genres": [g.name for g in self.genres],
"updated_at": self.updated_at,
"cached_at": self.cached_at
}
def update(self, trakt_movie, session):
"""Updates this record from the trakt media object `trakt_movie` returned by the trakt api."""
if self.id and self.id != trakt_movie['ids']['trakt']:
raise Exception('Tried to update db movie with different movie data')
elif not self.id:
self.id = trakt_movie['ids']['trakt']
self.slug = trakt_movie['ids']['slug']
self.imdb_id = trakt_movie['ids']['imdb']
self.tmdb_id = trakt_movie['ids']['tmdb']
for col in ['title', 'overview', 'runtime', 'rating', 'votes',
'language', 'tagline', 'year', 'trailer', 'homepage']:
setattr(self, col, trakt_movie.get(col))
if trakt_movie.get('released'):
self.released = dateutil_parse(trakt_movie.get('released'), ignoretz=True).date()
self.updated_at = dateutil_parse(trakt_movie.get('updated_at'), ignoretz=True)
self.genres = [TraktGenre(name=g.replace(' ', '-')) for g in trakt_movie.get('genres', [])]
self.cached_at = datetime.now()
self.translation_languages = trakt_movie.get('available_translations', [])
@property
def expired(self):
"""
:return: True if movie details are considered to be expired, ie. need of update
"""
# TODO stolen from imdb plugin, maybe there's a better way?
if self.updated_at is None:
log.debug('updated_at is None: %s', self)
return True
refresh_interval = 2
if self.year:
# Make sure age is not negative
age = max((datetime.now().year - self.year), 0)
refresh_interval += age * 5
log.debug('movie `%s` age %i expires in %i days', self.title, age, refresh_interval)
return self.cached_at < datetime.now() - timedelta(days=refresh_interval)
@property
def translations(self):
if not self._translations:
self._translations = get_translations(self.id, 'movie')
return self._translations
@property
def actors(self):
if not self._actors:
self._actors[:] = get_db_actors(self.id, 'movie')
return self._actors
class TraktShowSearchResult(Base):
__tablename__ = 'trakt_show_search_results'
id = Column(Integer, primary_key=True)
search = Column(Unicode, unique=True, nullable=False)
series_id = Column(Integer, ForeignKey('trakt_shows.id'), nullable=True)
series = relation(TraktShow, backref='search_strings')
def __init__(self, search, series_id=None, series=None):
self.search = search.lower()
if series_id:
self.series_id = series_id
if series:
self.series = series
class TraktMovieSearchResult(Base):
__tablename__ = 'trakt_movie_search_results'
id = Column(Integer, primary_key=True)
search = Column(Unicode, unique=True, nullable=False)
movie_id = Column(Integer, ForeignKey('trakt_movies.id'), nullable=True)
movie = relation(TraktMovie, backref='search_strings')
def __init__(self, search, movie_id=None, movie=None):
self.search = search.lower()
if movie_id:
self.movie_id = movie_id
if movie:
self.movie = movie
def split_title_year(title):
"""Splits title containing a year into a title, year pair."""
# We only recognize years from the 2nd and 3rd millennium, FlexGetters from the year 3000 be damned!
match = re.search(r'[\s(]([12]\d{3})\)?$', title)
if match:
title = title[:match.start()].strip()
year = int(match.group(1))
else:
year = None
return title, year
@with_session
def get_cached(style=None, title=None, year=None, trakt_id=None, trakt_slug=None, tmdb_id=None, imdb_id=None,
tvdb_id=None, tvrage_id=None, session=None):
"""
Get the cached info for a given show/movie from the database.
:param type: Either 'show' or 'movie'
"""
ids = {
'id': trakt_id,
'slug': trakt_slug,
'tmdb_id': tmdb_id,
'imdb_id': imdb_id,
}
if style == 'show':
ids['tvdb_id'] = tvdb_id
ids['tvrage_id'] = tvrage_id
model = TraktShow
else:
model = TraktMovie
result = None
if any(ids.values()):
result = session.query(model).filter(
or_(getattr(model, col) == val for col, val in ids.items() if val)).first()
elif title:
title, y = split_title_year(title)
year = year or y
query = session.query(model).filter(model.title == title)
if year:
query = query.filter(model.year == year)
result = query.first()
return result
def get_trakt(style=None, title=None, year=None, trakt_id=None, trakt_slug=None, tmdb_id=None, imdb_id=None,
tvdb_id=None, tvrage_id=None):
"""Returns the matching media object from trakt api."""
# TODO: Better error messages
# Trakt api accepts either id or slug (there is a rare possibility for conflict though, e.g. 24)
trakt_id = trakt_id or trakt_slug
if not any([title, trakt_id, tmdb_id, imdb_id, tvdb_id, tvrage_id]):
raise LookupError('No lookup arguments provided.')
req_session = get_session()
last_search_query = None # used if no results are found
last_search_type = None
if not trakt_id:
# Try finding trakt_id based on other ids
ids = {
'imdb': imdb_id,
'tmdb': tmdb_id
}
if style == 'show':
ids['tvdb'] = tvdb_id
ids['tvrage'] = tvrage_id
for id_type, identifier in ids.items():
if not identifier:
continue
try:
last_search_query = identifier
last_search_type = id_type
log.debug('Searching with params: %s=%s', id_type, identifier)
results = req_session.get(get_api_url('search'), params={'id_type': id_type, 'id': identifier}).json()
except requests.RequestException as e:
raise LookupError('Searching trakt for %s=%s failed with error: %s' % (id_type, identifier, e))
for result in results:
if result['type'] != style:
continue
trakt_id = result[style]['ids']['trakt']
break
if not trakt_id and title:
last_search_query = title
last_search_type = 'title'
# Try finding trakt id based on title and year
if style == 'show':
parsed_title, y = split_title_year(title)
y = year or y
else:
title_parser = get_plugin_by_name('parsing').instance.parse_movie(title)
y = year or title_parser.year
parsed_title = title_parser.name
try:
params = {'query': parsed_title, 'type': style, 'year': y}
log.debug('Type of title: %s', type(parsed_title))
log.debug('Searching with params: %s', ', '.join('{}={}'.format(k, v) for (k, v) in params.items()))
results = req_session.get(get_api_url('search'), params=params).json()
except requests.RequestException as e:
raise LookupError('Searching trakt for %s failed with error: %s' % (title, e))
for result in results:
if year and result[style]['year'] != year:
continue
if parsed_title.lower() == result[style]['title'].lower():
trakt_id = result[style]['ids']['trakt']
break
# grab the first result if there is no exact match
if not trakt_id and results:
trakt_id = results[0][style]['ids']['trakt']
if not trakt_id:
raise LookupError('Unable to find %s="%s" on trakt.' % (last_search_type, last_search_query))
# Get actual data from trakt
try:
return req_session.get(get_api_url(style + 's', trakt_id), params={'extended': 'full'}).json()
except requests.RequestException as e:
raise LookupError('Error getting trakt data for id %s: %s' % (trakt_id, e))
def update_collection_cache(style_ident, username=None, account=None):
if account and not username:
username = 'me'
url = get_api_url('users', username, 'collection', style_ident)
session = get_session(account=account)
try:
data = session.get(url).json()
if not data:
log.warning('No collection data returned from trakt.')
return
cache = get_user_cache(username=username, account=account)['collection'][style_ident]
log.verbose('Received %d records from trakt.tv %s\'s collection', len(data), username)
if style_ident == 'movies':
for movie in data:
movie_id = movie['movie']['ids']['trakt']
cache[movie_id] = movie['movie']
cache[movie_id]['collected_at'] = dateutil_parse(movie['collected_at'], ignoretz=True)
else:
for series in data:
series_id = series['show']['ids']['trakt']
cache[series_id] = series['show']
cache[series_id]['seasons'] = series['seasons']
cache[series_id]['collected_at'] = dateutil_parse(series['last_collected_at'], ignoretz=True)
except requests.RequestException as e:
raise plugin.PluginError('Unable to get data from trakt.tv: %s' % e)
def update_watched_cache(style_ident, username=None, account=None):
if account and not username:
username = 'me'
url = get_api_url('users', username, 'watched', style_ident)
session = get_session(account=account)
try:
data = session.get(url).json()
if not data:
log.warning('No watched data returned from trakt.')
return
cache = get_user_cache(username=username, account=account)['watched'][style_ident]
log.verbose('Received %d record(s) from trakt.tv %s\'s watched history', len(data), username)
if style_ident == 'movies':
for movie in data:
movie_id = movie['movie']['ids']['trakt']
cache[movie_id] = movie['movie']
cache[movie_id]['watched_at'] = dateutil_parse(movie['last_watched_at'], ignoretz=True)
cache[movie_id]['plays'] = movie['plays']
else:
for series in data:
series_id = series['show']['ids']['trakt']
cache[series_id] = series['show']
cache[series_id]['seasons'] = series['seasons']
cache[series_id]['watched_at'] = dateutil_parse(series['last_watched_at'], ignoretz=True)
cache[series_id]['plays'] = series['plays']
except requests.RequestException as e:
raise plugin.PluginError('Unable to get data from trakt.tv: %s' % e)
def update_user_ratings_cache(style_ident, username=None, account=None):
if account and not username:
username = 'me'
url = get_api_url('users', username, 'ratings', style_ident)
session = get_session(account=account)
try:
data = session.get(url).json()
if not data:
log.warning('No user ratings data returned from trakt.')
return
cache = get_user_cache(username=username, account=account)['user_ratings']
log.verbose('Received %d record(s) from trakt.tv %s\'s %s user ratings', len(data), username, style_ident)
for item in data:
# get the proper cache from the type returned by trakt
item_type = item['type']
item_cache = cache[item_type + 's']
# season cannot be put into shows because the code would turn to spaghetti later when retrieving from cache
# instead we put some season info inside the season cache key'd to series id
# eg. cache['seasons'][<show_id>][<season_number>] = ratings and stuff
if item_type == 'season':
show_id = item['show']['ids']['trakt']
season = item['season']['number']
item_cache.setdefault(show_id, {})
item_cache[show_id].setdefault(season, {})
item_cache = item_cache[show_id]
item_id = season
else:
item_id = item[item_type]['ids']['trakt']
item_cache[item_id] = item[item_type]
item_cache[item_id]['rated_at'] = dateutil_parse(item['rated_at'], ignoretz=True)
item_cache[item_id]['rating'] = item['rating']
except requests.RequestException as e:
raise plugin.PluginError('Unable to get data from trakt.tv: %s' % e)
def get_user_cache(username=None, account=None):
identifier = '{}|{}'.format(account, username or 'me')
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('watched', {}).setdefault('shows', {})
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('watched', {}).setdefault('movies', {})
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('collection', {}).setdefault('shows', {})
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('collection', {}).setdefault('movies', {})
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('user_ratings', {}).setdefault('shows', {})
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('user_ratings', {}).setdefault('seasons', {})
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('user_ratings', {}).setdefault('episodes', {})
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('user_ratings', {}).setdefault('movies', {})
return ApiTrakt.user_cache[identifier]
class ApiTrakt(object):
user_cache = TimedDict(cache_time='15 minutes')
@staticmethod
@with_session
def lookup_series(session=None, only_cached=None, **lookup_params):
series = get_cached('show', session=session, **lookup_params)
title = lookup_params.get('title') or ''
found = None
if not series and title:
found = session.query(TraktShowSearchResult).filter(TraktShowSearchResult.search == title.lower()).first()
if found and found.series:
log.debug('Found %s in previous search results as %s', title, found.series.title)
series = found.series
if only_cached:
if series:
return series
raise LookupError('Series %s not found from cache' % lookup_params)
if series and not series.expired:
return series
try:
trakt_show = get_trakt('show', **lookup_params)
except LookupError as e:
if series:
log.debug('Error refreshing show data from trakt, using cached. %s', e)
return series
raise
series = session.merge(TraktShow(trakt_show, session))
if series and title.lower() == series.title.lower():
return series
elif series and title and not found:
if not session.query(TraktShowSearchResult).filter(TraktShowSearchResult.search == title.lower()).first():
log.debug('Adding search result to db')
session.merge(TraktShowSearchResult(search=title, series=series))
elif series and found:
log.debug('Updating search result in db')
found.series = series
return series
@staticmethod
@with_session
def lookup_movie(session=None, only_cached=None, **lookup_params):
movie = get_cached('movie', session=session, **lookup_params)
title = lookup_params.get('title') or ''
found = None
if not movie and title:
found = session.query(TraktMovieSearchResult).filter(TraktMovieSearchResult.search == title.lower()).first()
if found and found.movie:
log.debug('Found %s in previous search results as %s', title, found.movie.title)
movie = found.movie
if only_cached:
if movie:
return movie
raise LookupError('Movie %s not found from cache' % lookup_params)
if movie and not movie.expired:
return movie
try:
trakt_movie = get_trakt('movie', **lookup_params)
except LookupError as e:
if movie:
log.debug('Error refreshing movie data from trakt, using cached. %s', e)
return movie
raise
movie = session.merge(TraktMovie(trakt_movie, session))
if movie and title.lower() == movie.title.lower():
return movie
if movie and title and not found:
if not session.query(TraktMovieSearchResult).filter(TraktMovieSearchResult.search == title.lower()).first():
log.debug('Adding search result to db')
session.merge(TraktMovieSearchResult(search=title, movie=movie))
elif movie and found:
log.debug('Updating search result in db')
found.movie = movie
return movie
@staticmethod
def collected(style, trakt_data, title, username=None, account=None):
style_ident = 'movies' if style == 'movie' else 'shows'
cache = get_user_cache(username=username, account=account)
if not cache['collection'][style_ident]:
log.debug('No collection found in cache.')
update_collection_cache(style_ident, username=username, account=account)
if not cache['collection'][style_ident]:
log.warning('No collection data returned from trakt.')
return
in_collection = False
cache = cache['collection'][style_ident]
if style == 'show':
if trakt_data.id in cache:
series = cache[trakt_data.id]
# specials are not included
number_of_collected_episodes = sum(len(s['episodes']) for s in series['seasons'] if s['number'] > 0)
in_collection = number_of_collected_episodes >= trakt_data.aired_episodes
elif style == 'episode':
if trakt_data.show.id in cache:
series = cache[trakt_data.show.id]
for s in series['seasons']:
if s['number'] == trakt_data.season:
# extract all episode numbers currently in collection for the season number
episodes = [ep['number'] for ep in s['episodes']]
in_collection = trakt_data.number in episodes
break
else:
if trakt_data.id in cache:
in_collection = True
log.debug('The result for entry "%s" is: %s', title,
'Owned' if in_collection else 'Not owned')
return in_collection
@staticmethod
def watched(style, trakt_data, title, username=None, account=None):
style_ident = 'movies' if style == 'movie' else 'shows'
cache = get_user_cache(username=username, account=account)
if not cache['watched'][style_ident]:
log.debug('No watched history found in cache.')
update_watched_cache(style_ident, username=username, account=account)
if not cache['watched'][style_ident]:
log.warning('No watched data returned from trakt.')
return
watched = False
cache = cache['watched'][style_ident]
if style == 'show':
if trakt_data.id in cache:
series = cache[trakt_data.id]
# specials are not included
number_of_watched_episodes = sum(len(s['episodes']) for s in series['seasons'] if s['number'] > 0)
watched = number_of_watched_episodes == trakt_data.aired_episodes
elif style == 'episode':
if trakt_data.show.id in cache:
series = cache[trakt_data.show.id]
for s in series['seasons']:
if s['number'] == trakt_data.season:
# extract all episode numbers currently in collection for the season number
episodes = [ep['number'] for ep in s['episodes']]
watched = trakt_data.number in episodes
break
else:
if trakt_data.id in cache:
watched = True
log.debug('The result for entry "%s" is: %s', title,
'Watched' if watched else 'Not watched')
return watched
@staticmethod
def user_ratings(style, trakt_data, title, username=None, account=None):
style_ident = style + 's'
cache = get_user_cache(username=username, account=account)
if not cache['user_ratings'][style_ident]:
log.debug('No user ratings found in cache.')
update_user_ratings_cache(style_ident, username=username, account=account)
if not cache['user_ratings'][style_ident]:
log.warning('No user ratings data returned from trakt.')
return
user_rating = None
cache = cache['user_ratings'][style_ident]
# season ratings are a little annoying and require butchering the code
if style == 'season' and trakt_data.series_id in cache:
if trakt_data.season in cache[trakt_data.series_id]:
user_rating = cache[trakt_data.series_id][trakt_data.season]['rating']
if trakt_data.id in cache:
user_rating = cache[trakt_data.id]['rating']
log.debug('User rating for entry "%s" is: %s', title, user_rating)
return user_rating
@event('plugin.register')
def register_plugin():
plugin.register(ApiTrakt, 'api_trakt', api_ver=2)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
from openstackclient.common import utils
from openstackclient.tests import fakes
from openstackclient.tests.identity.v3 import fakes as identity_fakes
from openstackclient.tests import utils as tests_utils
from openstackclient.tests.volume.v2 import fakes as volume_fakes
from openstackclient.volume.v2 import volume_type
class TestType(volume_fakes.TestVolume):
def setUp(self):
super(TestType, self).setUp()
self.types_mock = self.app.client_manager.volume.volume_types
self.types_mock.reset_mock()
self.types_access_mock = (
self.app.client_manager.volume.volume_type_access)
self.types_access_mock.reset_mock()
self.projects_mock = self.app.client_manager.identity.projects
self.projects_mock.reset_mock()
class TestTypeCreate(TestType):
columns = (
'description',
'id',
'name',
)
def setUp(self):
super(TestTypeCreate, self).setUp()
self.new_volume_type = volume_fakes.FakeType.create_one_type()
self.data = (
self.new_volume_type.description,
self.new_volume_type.id,
self.new_volume_type.name,
)
self.types_mock.create.return_value = self.new_volume_type
# Get the command object to test
self.cmd = volume_type.CreateVolumeType(self.app, None)
def test_type_create_public(self):
arglist = [
"--description", self.new_volume_type.description,
"--public",
self.new_volume_type.name,
]
verifylist = [
("description", self.new_volume_type.description),
("public", True),
("private", False),
("name", self.new_volume_type.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.types_mock.create.assert_called_with(
self.new_volume_type.name,
description=self.new_volume_type.description,
is_public=True,
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
def test_type_create_private(self):
arglist = [
"--description", self.new_volume_type.description,
"--private",
self.new_volume_type.name,
]
verifylist = [
("description", self.new_volume_type.description),
("public", False),
("private", True),
("name", self.new_volume_type.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.types_mock.create.assert_called_with(
self.new_volume_type.name,
description=self.new_volume_type.description,
is_public=False,
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
class TestTypeDelete(TestType):
volume_type = volume_fakes.FakeType.create_one_type()
def setUp(self):
super(TestTypeDelete, self).setUp()
self.types_mock.get.return_value = self.volume_type
self.types_mock.delete.return_value = None
# Get the command object to mock
self.cmd = volume_type.DeleteVolumeType(self.app, None)
def test_type_delete(self):
arglist = [
self.volume_type.id
]
verifylist = [
("volume_type", self.volume_type.id)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.types_mock.delete.assert_called_with(self.volume_type.id)
self.assertIsNone(result)
class TestTypeList(TestType):
volume_types = volume_fakes.FakeType.create_types()
columns = [
"ID",
"Name"
]
columns_long = columns + [
"Description",
"Properties"
]
data = []
for t in volume_types:
data.append((
t.id,
t.name,
))
data_long = []
for t in volume_types:
data_long.append((
t.id,
t.name,
t.description,
utils.format_dict(t.extra_specs),
))
def setUp(self):
super(TestTypeList, self).setUp()
self.types_mock.list.return_value = self.volume_types
# get the command to test
self.cmd = volume_type.ListVolumeType(self.app, None)
def test_type_list_without_options(self):
arglist = []
verifylist = [
("long", False)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, list(data))
def test_type_list_with_options(self):
arglist = ["--long"]
verifylist = [("long", True)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.assertEqual(self.columns_long, columns)
self.assertEqual(self.data_long, list(data))
class TestTypeSet(TestType):
volume_type = volume_fakes.FakeType.create_one_type(
methods={'set_keys': None})
def setUp(self):
super(TestTypeSet, self).setUp()
self.types_mock.get.return_value = self.volume_type
# Return a project
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
# Get the command object to test
self.cmd = volume_type.SetVolumeType(self.app, None)
def test_type_set_name(self):
new_name = 'new_name'
arglist = [
'--name', new_name,
self.volume_type.id,
]
verifylist = [
('name', new_name),
('description', None),
('property', None),
('volume_type', self.volume_type.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'name': new_name,
}
self.types_mock.update.assert_called_with(
self.volume_type.id,
**kwargs
)
self.assertIsNone(result)
def test_type_set_description(self):
new_desc = 'new_desc'
arglist = [
'--description', new_desc,
self.volume_type.id,
]
verifylist = [
('name', None),
('description', new_desc),
('property', None),
('volume_type', self.volume_type.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'description': new_desc,
}
self.types_mock.update.assert_called_with(
self.volume_type.id,
**kwargs
)
self.assertIsNone(result)
def test_type_set_property(self):
arglist = [
'--property', 'myprop=myvalue',
self.volume_type.id,
]
verifylist = [
('name', None),
('description', None),
('property', {'myprop': 'myvalue'}),
('volume_type', self.volume_type.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.volume_type.set_keys.assert_called_once_with(
{'myprop': 'myvalue'})
self.assertIsNone(result)
def test_type_set_not_called_without_project_argument(self):
arglist = [
'--project', '',
self.volume_type.id,
]
verifylist = [
('project', ''),
('volume_type', self.volume_type.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.assertIsNone(result)
self.assertFalse(self.types_access_mock.add_project_access.called)
def test_type_set_failed_with_missing_volume_type_argument(self):
arglist = [
'--project', 'identity_fakes.project_id',
]
verifylist = [
('project', 'identity_fakes.project_id'),
]
self.assertRaises(tests_utils.ParserException,
self.check_parser,
self.cmd,
arglist,
verifylist)
def test_type_set_project_access(self):
arglist = [
'--project', identity_fakes.project_id,
self.volume_type.id,
]
verifylist = [
('project', identity_fakes.project_id),
('volume_type', self.volume_type.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.assertIsNone(result)
self.types_access_mock.add_project_access.assert_called_with(
self.volume_type.id,
identity_fakes.project_id,
)
class TestTypeShow(TestType):
columns = (
'description',
'id',
'name',
'properties',
)
def setUp(self):
super(TestTypeShow, self).setUp()
self.volume_type = volume_fakes.FakeType.create_one_type()
self.data = (
self.volume_type.description,
self.volume_type.id,
self.volume_type.name,
utils.format_dict(self.volume_type.extra_specs)
)
self.types_mock.get.return_value = self.volume_type
# Get the command object to test
self.cmd = volume_type.ShowVolumeType(self.app, None)
def test_type_show(self):
arglist = [
self.volume_type.id
]
verifylist = [
("volume_type", self.volume_type.id)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.types_mock.get.assert_called_with(self.volume_type.id)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
class TestTypeUnset(TestType):
volume_type = volume_fakes.FakeType.create_one_type(
methods={'unset_keys': None})
def setUp(self):
super(TestTypeUnset, self).setUp()
self.types_mock.get.return_value = self.volume_type
# Return a project
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
# Get the command object to test
self.cmd = volume_type.UnsetVolumeType(self.app, None)
def test_type_unset(self):
arglist = [
'--property', 'property',
'--property', 'multi_property',
self.volume_type.id,
]
verifylist = [
('property', ['property', 'multi_property']),
('volume_type', self.volume_type.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.volume_type.unset_keys.assert_called_once_with(
['property', 'multi_property'])
self.assertIsNone(result)
def test_type_unset_project_access(self):
arglist = [
'--project', identity_fakes.project_id,
self.volume_type.id,
]
verifylist = [
('project', identity_fakes.project_id),
('volume_type', self.volume_type.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.assertIsNone(result)
self.types_access_mock.remove_project_access.assert_called_with(
self.volume_type.id,
identity_fakes.project_id,
)
def test_type_unset_not_called_without_project_argument(self):
arglist = [
'--project', '',
self.volume_type.id,
]
verifylist = [
('project', ''),
('volume_type', self.volume_type.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.assertIsNone(result)
self.assertFalse(self.types_access_mock.remove_project_access.called)
def test_type_unset_failed_with_missing_volume_type_argument(self):
arglist = [
'--project', 'identity_fakes.project_id',
]
verifylist = [
('project', 'identity_fakes.project_id'),
]
self.assertRaises(tests_utils.ParserException,
self.check_parser,
self.cmd,
arglist,
verifylist)
|
|
import os
import random
import cookielib
import re
import urllib2
import urllib
from mimetypes import MimeTypes
import urlparse
from ptdb import PtSqlite
from ptthread import PtWorkManager
from ptcore import PtCp,PtFile
from ptcore import hex_md5, pathHashDir, makeDir, rmDir, user_agents,\
error_log, trace_back
class NoRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_301(self, req, fp, code, msg, headers):
pass
def http_error_302(self, req, fp, code, msg, headers):
pass
class PtSpider(object):
def __init__(self):
self.http = PtUrllib()
self.http.cache =True
self.file = PtFile()
self.cp = PtCp()
self.db = PtSqlite()
def md5(self,s):
return hex_md5(s)
def hash_dir(self,s,level = 3):
return pathHashDir(s,level)
def makeDir(self,filename):
makeDir(os.path.dirname(filename))
def rmDir(self,d):
rmDir(d)
class PtUrllib(object):
cache_dir = '.cache'
cookie_file = 'cookie.txt'
cache = False
user_agent = None
opener = None
debuglevel = 0
cookie_jar = None
redirect30x = True
proxy = None
def __init__(self):
#self.user_agent = random.choice(user_agents)
self.user_agent = random.choice(user_agents)
def getUrlCacheFile(self,url,level = 3):
return os.path.join(os.path.abspath(self.cache_dir),pathHashDir(url,level),hex_md5(url)+".txt")
def checkUrlCacheExits(self,url,level = 3):
path = self.getUrlCacheFile(url,level)
return os.path.isfile(path)
def delUrlCache(self,url,level = 3):
path = self.getUrlCacheFile(url,level)
if os.path.isfile(path):
os.remove(path)
def setOpener(self):
handlers = []
httpHandler = urllib2.HTTPHandler(debuglevel=self.debuglevel)
httpsHandler = urllib2.HTTPSHandler(debuglevel=self.debuglevel)
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
try:
self.cookie_jar.load(ignore_discard=True, ignore_expires=True)
except Exception , what:
self.cookie_jar.save(self.cookie_file,ignore_discard=True, ignore_expires=True)
handlers.append(httpHandler)
handlers.append(httpsHandler)
handlers.append(urllib2.HTTPCookieProcessor(self.cookie_jar))
if self.proxy is not None:
#{'http':'http://XX.XX.XX.XX:XXXX'}
proxy_support = urllib2.ProxyHandler(self.proxy)
handlers.append(proxy_support)
if self.redirect30x == False:
handlers.append(NoRedirectHandler)
self.opener = urllib2.build_opener(*handlers)
def dom(self,r):
#return PyQuery(unicode(r,'utf-8'))
pass
#return PyQuery(r)
def wm(self,func,w_num,t_num):
wm = PtWorkManager(func,w_num,t_num)
wm.wait_allcomplete()
def find(self,p,s):
m = re.search(p,s)
if m:
return m.group(1)
else:
return ''
def get(self,url,info = {},timeout = 20):
return self.urlopen(url,method = "get",data={},info =info,timeout = timeout )
def post(self,url,data,info = {},timeout = 20):
return self.urlopen(url,method = "post",data = data,info =info,timeout = timeout);
def urlopen(self,url,method = 'get',data = {},info = {},timeout = 30):
_url = ''
if method == "post":
query = urllib.urlencode(data)
_url = url
url = url + "?" + query
if self.cache:
if self.checkUrlCacheExits(url):
return self.getCacheContent(url)
if self.opener is None:
self.setOpener()
v = {}
for k in info:
v[k] = info[k]
v['url'] = url
v['local'] = self.getUrlCacheFile(url)
v['headers'] = ''
v['cache'] = False
v['body'] = ''
self.setUrlCache(url,v)
try:
if method == "get":
req = urllib2.Request(url)
else:
req = urllib2.Request(_url,query)
req.add_header("User-Agent", self.user_agent)
r = self.opener.open(req,timeout = timeout)
except urllib2.HTTPError, e:
self.delUrlCache(url)
error_log(url+"\n"+trace_back()+"\n")
return None
except Exception , e:
self.delUrlCache(url)
error_log(url+"\n"+trace_back()+"\n")
return None
self.saveCookie()
v['headers'] = dict(r.headers)
v['body'] = r.read()
self.setUrlCache(url,v)
r.close()
return v
def setUrlCache(self,url,v,level = 3):
#if self.cache == False:
# return
vv = {}
vv['url'] = v['url']
vv['headers'] = v['headers']
vv['cache'] = True
vv['body'] = v['body']
vv['local'] = v['local']
cp = PtCp()
path = self.getUrlCacheFile(url,level)
makeDir(os.path.dirname(path))
cp.dump(vv, path)
def saveCookie(self):
self.cookie_jar.save(self.cookie_file,ignore_discard=True, ignore_expires=True)
def getCacheContent(self,url):
cp = PtCp()
path = self.getUrlCacheFile(url)
return cp.load(path)
def getResponseUrl(self,response):
return response.geturl()
def getResponseLen(self,response):
return int(dict(response.headers).get('content-length', 0))
class PtCacheHandler():
host = ''
root_dir = "C:\\Users\\Joseph\\Desktop\\download"
def precess(self,buffer,url,header):
if buffer == '':
try:
buffer = urllib2.urlopen(url,None,5).read()
except Exception,e:
print e
self.save(buffer, url, header)
def parseHeader(self,rawHeaderPairs):
r = ''
for re in rawHeaderPairs:
if re[0] == 'Content-Type':
r= re[1]
if r and ";" in r:
r = r.split(';')[0]
return r
#print re[0],re[1]
def getMimeType(self,buffre,url,mtype):
if '?' in url:
url = url.split('?')[0]
mime = MimeTypes()
ext = os.path.splitext(url)[1]
if mtype == 'text/html' and ext == '':
if url[-1] == '/':
l = len(url)-1
url = url[0:-1]
url = url+'/index.html'
ext = '.html'
#ext1 = mime.guess_extension(mtype,True)
#print ext1
mime_type = mime.guess_type(url)
#print url
if ext:
#print url
u = urlparse.urlparse(url)
#print u.netloc,u.path
print self.host
if self.host:
root_dir = self.root_dir+"/"+self.host
file_path = os.path.join(root_dir,u.netloc+u.path)
print file_path
#if not os.path.isfile(file_path):
makeDir(os.path.dirname(file_path))
f = open(file_path,"wb")
f.write(buffre)
#print url,ext,mime_type
def save(self,buffre,url,header):
mime_type = self.parseHeader(header)
self.getMimeType(buffre,url,mime_type)
|
|
from __future__ import print_function
from __future__ import absolute_import
import os
import re
import subprocess
import threading
import logging
import socket
import inspect
import tempfile
import stat
import shlex
import platform
from . import config
#from common import *
MODULEFILE = re.sub('\.py', '',
os.path.basename(inspect.stack()[0][1]))
DEBUG = config.DEBUG
STRICT = config.STRICT
def debugprint(log_level=None):
def dprint(lines=[], module=""):
if type(lines) is not list:
lines = [lines]
if len(module) != 0:
module = "[{}]".format(module)
if len(lines) == 0:
print("{}:".format(log_level), module, "-" * 40)
else:
for line in lines:
print("{}:".format(log_level), module, line)
return []
def noop(a=[], b=""):
return []
if log_level == "DEBUG" or DEBUG:
return dprint
else:
return noop
def timeout(func, args=(), kwargs={},
timeout_duration=10, default=None, log=None):
"""This function will spawn a thread and run the given function
using the args, kwargs and return the given default value if the
timeout_duration is exceeded.
"""
class InterruptableThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = default
def run(self):
self.result = func(*args, **kwargs)
try:
if log:
log.info("Starting tiemoutthread for '{}' timeout in {}s".format(
func.__name__, timeout_duration))
it = InterruptableThread()
it.start()
it.join(timeout_duration)
if it.isAlive():
return it.result
else:
return it.result
except:
if log:
log.warning("Exception occurred in timerthread for '{}'".format(
func.__name__))
return default
def _execute(cmd, stdout=None, stderr=None, shell=False):
retval = None
if stderr is None:
stderr = stdout
try:
proc = subprocess.Popen(cmd,
universal_newlines=True,
stdout=stdout,
stderr=stderr,
shell=shell)
retval = proc.wait()
except Exception as _:
if STRICT:
raise
if DEBUG:
print("Exception has occurred in _execute(cmd, stdout, stderr)")
dp = debugprint()
dp("Error in executing '{}' exception is '{}'".
format(cmd, _), "_execute")
pass
return retval
def _read_unlink_handle(fh, fn):
retval = []
try:
with os.fdopen(fh) as fd:
fd.seek(0)
retval = fd.readlines()
except:
if STRICT:
raise
pass
finally:
try:
os.unlink(fn)
except:
pass
return retval
class Command(object):
def __init__(self, cmd, log=False, shell=False):
self.cmd = cmd
self.shell = shell
self.process = None
self.output = []
self.errput = []
self.errfile_handle, self.errfile_name = tempfile.mkstemp()
self.outfile_handle, self.outfile_name = tempfile.mkstemp()
def run(self, maxtime=30):
self.retval = timeout(_execute,
args=(self.cmd,
self.outfile_handle,
self.errfile_handle,
self.shell),
timeout_duration=maxtime)
self.output = _read_unlink_handle(self.outfile_handle,
self.outfile_name)
self.errput = _read_unlink_handle(self.errfile_handle,
self.errfile_name)
return self.retval
def run_command(args, split=True):
if isinstance(args, basestring):
if split:
args = shlex.split(args)
args = args
try:
if split:
prg = Command(args)
else:
prg = Command(args, shell=True)
prg.run()
except Exception as _:
if STRICT:
raise
if DEBUG:
dp = debugprint()
dp("Error in executing '{}' exception is '{}'".
format(args, _), "run_command")
rc = 1
out = "UnknownError"
err = "UnknownError"
else:
rc, out, err = prg.retval, ''.join(prg.output), ''.join(prg.errput)
return (rc, out, err)
def is_executable(path):
'''is the given path executable?'''
return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
def get_bin_path(arg, required=False, opt_dirs=[]):
'''
find system executable in PATH.
Optional arguments:
- required: if executable is not found and required is true, fail_json
- opt_dirs: optional list of directories to search in addition to PATH
if found return full path; otherwise return None
'''
sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
paths = []
for d in opt_dirs:
if d is not None and os.path.exists(d):
paths.append(d)
paths += os.environ.get('PATH', '').split(os.pathsep)
bin_path = None
# mangle PATH to include /sbin dirs
for p in sbin_paths:
if p not in paths and os.path.exists(p):
paths.append(p)
for d in paths:
path = os.path.join(d, arg)
if os.path.exists(path) and is_executable(path):
bin_path = path
break
return bin_path
def pretty_bytes(size=0):
ranges = ((1 << 70L, 'ZB'),
(1 << 60L, 'EB'),
(1 << 50L, 'PB'),
(1 << 40L, 'TB'),
(1 << 30L, 'GB'),
(1 << 20L, 'MB'),
(1 << 10L, 'KB'),
(1, 'Bytes'))
for limit, suffix in ranges:
if size >= limit:
break
return '%.2f %s' % (float(size)/limit, suffix)
def fetch_dns(ip="127.0.0.1"):
try:
name = socket.gethostbyaddr(str(ip))[0]
except:
if re.match('^127\.[.0-9]*$', ip):
name = "localhost"
else:
name = ip
return name
#def fetchprocessor(modules=[]):
# return fetcher(importer(modules))
def fetch_lg(name=None):
return logging.getLogger("clearspark")
def setup_logger(name=None,
logfile=None,
console=True,
level=60,
):
log = fetch_lg()
mylevel = 60 - level*20
if mylevel < 10:
log.setLevel(logging.DEBUG)
elif mylevel >= 60:
console = False
log.setLevel(logging.CRITICAL)
else:
log.setLevel(mylevel)
#
if logfile is not None:
fh = logging.FileHandler(file)
fh.setLevel(logging.DEBUG)
fmtf = logging.Formatter(
'%(asctime)s - %(name)s ' +
'%(hostname)-16s %(levelname)-8s ' +
'@%(step)-30s %(message)s')
fh.setFormatter(fmtf)
log.addHandler(fh)
#
if console is True:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
fmtc = logging.Formatter(
'%(asctime)s - %(name)s ' +
'%(hostname)-16s %(levelname)-8s ' +
'@%(step)-30s %(message)s')
ch.setFormatter(fmtc)
log.addHandler(ch)
return log
def parse_ip_output(output):
"""
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast qlen 1000
link/ether 00:50:56:8e:01:af brd ff:ff:ff:ff:ff:ff
inet 171.70.42.153/24 brd 171.70.42.255 scope global eth0
inet6 fe80::250:56ff:fe8e:1af/64 scope link
valid_lft forever preferred_lft forever
3: sit0: <NOARP> mtu 1480 qdisc noop
link/sit 0.0.0.0 brd 0.0.0.0
"""
out = {}
cur = None
for l in output:
r = re.match('[0-9]+: ([a-z]+[0-9]*): <.*', l)
if r:
cur = r.group(1)
out[cur] = []
continue
r = re.match(' *inet ([0-9\.]+).*', l)
if r:
if cur is None:
continue
else:
out[cur].append(r.group(1))
continue
r = re.match(' *inet6 ([a-zA-Z0-9\[\]:\.]+).*', l)
if r:
if cur is None:
continue
else:
out[cur].append(r.group(1))
continue
return out
def parse_device_number(d):
retval = None
if d[0:2] == '0x':
# a hex nice
d_minor = int(int(d, 16) & 0xff)
d_major = int(int(d, 16) >> 8 & 0xff)
retval = "{},{}".format(d_major, d_minor)
elif re.match('[0-9]+,[0-9]+', d):
retval = d
return retval
# sudo awk '!/fuse\./{"stat "$2" -c 0x%D" | getline ss; printf "%s %s %s %s\n"
# , $1, $2, ss, $3}' /proc/mounts 2>/dev/null
def parse_mt_output(output):
_device_to_mount = {}
_device_num_to_list = {}
for l in output:
parsed = shlex.split(l)
device = parsed[0]
mount = parsed[1]
device_number = parsed[2]
fs_type = parsed[3]
_device_to_mount[device] = mount
if fs_type not in ['nfs', 'aufs', 'cifs', 'pnfs', 'smbfs']:
_device_num_to_list[parse_device_number(device_number)] = \
[device, mount]
return {"_device_to_mount": _device_to_mount,
"_device_num_to_list": _device_num_to_list}
|
|
"""
:codeauthor: :email:`Christian McHugh <[email protected]>`
"""
import salt.modules.config as config
import salt.modules.zabbix as zabbix
from salt.exceptions import SaltException
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
CONN_ARGS = {}
CONN_ARGS["url"] = "http://test.url"
CONN_ARGS["auth"] = "1234"
GETID_QUERY_RESULT_OK = [
{"internal": "0", "flags": "0", "groupid": "11", "name": "Databases"}
]
GETID_QUERY_RESULT_BAD = [
{"internal": "0", "flags": "0", "groupid": "11", "name": "Databases"},
{"another": "object"},
]
DEFINED_PARAMS = {
"name": "beta",
"eventsource": 2,
"status": 0,
"filter": {
"evaltype": 2,
"conditions": [{"conditiontype": 24, "operator": 2, "value": "db"}],
},
"operations": [
{"operationtype": 2},
{
"operationtype": 4,
"opgroup": [
{"groupid": {"query_object": "hostgroup", "query_name": "Databases"}}
],
},
],
"empty_list": [],
}
SUBSTITUTED_DEFINED_PARAMS = {
"status": "0",
"filter": {
"evaltype": "2",
"conditions": [{"operator": "2", "conditiontype": "24", "value": "db"}],
},
"eventsource": "2",
"name": "beta",
"operations": [
{"operationtype": "2"},
{"opgroup": [{"groupid": "11"}], "operationtype": "4"},
],
"empty_list": [],
}
EXISTING_OBJECT_PARAMS = {
"status": "0",
"operations": [
{
"operationtype": "2",
"esc_period": "0",
"evaltype": "0",
"opconditions": [],
"esc_step_to": "1",
"actionid": "23",
"esc_step_from": "1",
"operationid": "64",
},
{
"operationtype": "4",
"esc_period": "0",
"evaltype": "0",
"opconditions": [],
"esc_step_to": "1",
"actionid": "23",
"esc_step_from": "1",
"opgroup": [{"groupid": "11", "operationid": "65"}],
"operationid": "65",
},
],
"def_shortdata": "",
"name": "beta",
"esc_period": "0",
"def_longdata": "",
"filter": {
"formula": "",
"evaltype": "2",
"conditions": [
{
"operator": "2",
"conditiontype": "24",
"formulaid": "A",
"value": "DIFFERENT VALUE HERE",
}
],
"eval_formula": "A",
},
"eventsource": "2",
"actionid": "23",
"r_shortdata": "",
"r_longdata": "",
"recovery_msg": "0",
"empty_list": [{"dict_key": "dic_val"}],
}
DIFF_PARAMS_RESULT = {
"filter": {
"evaltype": "2",
"conditions": [{"operator": "2", "conditiontype": "24", "value": "db"}],
},
"empty_list": [],
}
DIFF_PARAMS_RESULT_WITH_ROLLBACK = {
"new": DIFF_PARAMS_RESULT,
"old": {
"filter": {
"formula": "",
"evaltype": "2",
"conditions": [
{
"operator": "2",
"conditiontype": "24",
"formulaid": "A",
"value": "DIFFERENT VALUE HERE",
}
],
"eval_formula": "A",
},
"empty_list": [{"dict_key": "dic_val"}],
},
}
class ZabbixTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.modules.zabbix
"""
def setup_loader_modules(self):
return {
zabbix: {
"__salt__": {
"cmd.which_bin": lambda _: "zabbix_server",
"config.get": config.get,
}
},
config: {"__opts__": {}},
}
def test_get_object_id_by_params(self):
"""
Test get_object_id function with expected result from API call
"""
with patch(
"salt.modules.zabbix.run_query",
MagicMock(return_value=GETID_QUERY_RESULT_OK),
):
self.assertEqual(
zabbix.get_object_id_by_params("hostgroup", "Databases"), "11"
)
def test_get_obj_id_by_params_fail(self):
"""
Test get_object_id function with unexpected result from API call
"""
with patch(
"salt.modules.zabbix.run_query",
MagicMock(return_value=GETID_QUERY_RESULT_BAD),
):
self.assertRaises(
SaltException, zabbix.get_object_id_by_params, "hostgroup", "Databases"
)
def test_substitute_params(self):
"""
Test proper parameter substitution for defined input
"""
with patch(
"salt.modules.zabbix.get_object_id_by_params", MagicMock(return_value="11")
):
self.assertEqual(
zabbix.substitute_params(DEFINED_PARAMS), SUBSTITUTED_DEFINED_PARAMS
)
def test_substitute_params_fail(self):
"""
Test proper parameter substitution if there is needed parameter missing
"""
self.assertRaises(
SaltException,
zabbix.substitute_params,
{"groupid": {"query_object": "hostgroup"}},
)
def test_compare_params(self):
"""
Test result comparison of two params structures
"""
self.assertEqual(
zabbix.compare_params(SUBSTITUTED_DEFINED_PARAMS, EXISTING_OBJECT_PARAMS),
DIFF_PARAMS_RESULT,
)
def test_compare_params_rollback(self):
"""
Test result comparison of two params structures with rollback return value option
"""
self.assertEqual(
zabbix.compare_params(
SUBSTITUTED_DEFINED_PARAMS, EXISTING_OBJECT_PARAMS, True
),
DIFF_PARAMS_RESULT_WITH_ROLLBACK,
)
def test_compare_params_fail(self):
"""
Test result comparison of two params structures where some data type mismatch exists
"""
self.assertRaises(
SaltException, zabbix.compare_params, {"dict": "val"}, {"dict": ["list"]}
)
def test_apiinfo_version(self):
"""
Test apiinfo_version
"""
module_return = "3.4.5"
query_return = {"jsonrpc": "2.0", "result": "3.4.5", "id": 1}
with patch.object(zabbix, "_query", return_value=query_return):
with patch.object(zabbix, "_login", return_value=CONN_ARGS):
self.assertEqual(zabbix.apiinfo_version(**CONN_ARGS), module_return)
def test__login_getting_nested_parameters_from_config(self):
"""
Test get the connection data as nested parameters from config
"""
query_return = {"jsonrpc": "2.0", "result": "3.4.5", "id": 1}
fake_connection_data = {
"zabbix": {
"user": "testuser",
"password": "password",
"url": "http://fake_url/zabbix/api_jsonrpc.php",
}
}
login_return = {
"url": "http://fake_url/zabbix/api_jsonrpc.php",
"auth": "3.4.5",
}
with patch.object(zabbix, "_query", return_value=query_return):
with patch.dict(zabbix.__pillar__, fake_connection_data):
self.assertEqual(zabbix._login(), login_return)
def test__login_getting_flat_parameters_from_config(self):
"""
Test get the connection data as flat parameters from config
"""
query_return = {"jsonrpc": "2.0", "result": "3.4.5", "id": 1}
fake_connection_data = {
"zabbix.user": "testuser",
"zabbix.password": "password",
"zabbix.url": "http://fake_url/zabbix/api_jsonrpc.php",
}
login_return = {
"url": "http://fake_url/zabbix/api_jsonrpc.php",
"auth": "3.4.5",
}
with patch.object(zabbix, "_query", return_value=query_return):
with patch.dict(zabbix.__pillar__, fake_connection_data):
self.assertEqual(zabbix._login(), login_return)
def test__login_getting_empty_parameters_from_config(self):
"""
Test get the connection data from config with an empty response
"""
query_return = {"jsonrpc": "2.0", "result": "3.4.5", "id": 1}
fake_connection_data = {}
with patch.object(zabbix, "_query", return_value=query_return):
with patch.dict(zabbix.__pillar__, fake_connection_data):
with self.assertRaises(SaltException) as login_exception:
ret = zabbix._login()
self.assertEqual(
login_exception.strerror,
"URL is probably not correct! ('user')",
)
def test_get_mediatype(self):
"""
query_submitted = { "params": {"filter": {"description": 10}, "output": "extend"},
"id": 0, "auth": "251feb98e3c25b6b7fb984b6c7a79817", "method": "mediatype.get"}
"""
module_return = [
{
"mediatypeid": "10",
"type": "0",
"name": "Testing",
"smtp_server": "mail.example.com",
"smtp_helo": "example.com",
"smtp_email": "[email protected]",
}
]
query_return = {
"jsonrpc": "2.0",
"result": [
{
"mediatypeid": "10",
"type": "0",
"name": "Testing",
"smtp_server": "mail.example.com",
"smtp_helo": "example.com",
"smtp_email": "[email protected]",
}
],
"id": 0,
}
zabbix_version_return_list = ["3.4", "4.4.5"]
for zabbix_version_return in zabbix_version_return_list:
patch_apiinfo_version = patch.object(
zabbix,
"apiinfo_version",
autospec=True,
return_value=zabbix_version_return,
)
patch_query = patch.object(
zabbix, "_query", autospec=True, return_value=query_return
)
patch_login = patch.object(
zabbix, "_login", autospec=True, return_value=CONN_ARGS
)
with patch_apiinfo_version, patch_query, patch_login:
self.assertEqual(zabbix.mediatype_get("10", **CONN_ARGS), module_return)
def test_user_create(self):
"""
query_submitted = {"params": {"passwd": "password007", "alias": "james",
"name": "James Bond", "usrgrps": [{"usrgrpid": 7}, {"usrgrpid": 12}]},
"jsonrpc": "2.0", "id": 0, "auth": "f016981c4f0d3f8b9682e34588fe8a33",
"method": "user.create"}
"""
module_return = ["3"]
query_return = {"jsonrpc": "2.0", "result": {"userids": ["3"]}, "id": 0}
with patch.object(zabbix, "_query", return_value=query_return):
with patch.object(zabbix, "_login", return_value=CONN_ARGS):
self.assertEqual(
zabbix.user_create(
"james",
"password007",
"[7, 12]",
firstname="James Bond",
**CONN_ARGS
),
module_return,
)
def test_user_delete(self):
"""
query_submitted = {"params": [3], "jsonrpc": "2.0", "id": 0,
"auth": "68d38eace8e42a35c8d0c6a2ab0245a6", "method": "user.delete"}
"""
module_return = ["3"]
query_return = {"jsonrpc": "2.0", "result": {"userids": ["3"]}, "id": 0}
with patch.object(zabbix, "_query", return_value=query_return):
with patch.object(zabbix, "_login", return_value=CONN_ARGS):
self.assertEqual(zabbix.user_delete(3, **CONN_ARGS), module_return)
def test_user_exists(self):
"""
query_submitted = {"params": {"filter": {"alias": "Admin"}, "output": "extend"},
"jsonrpc": "2.0", "id": 0, "auth": "72435c7f754cb2adb4ecddc98216057f",
"method": "user.get"}
"""
module_return = True
# pylint: disable=E8128
query_return = {
"jsonrpc": "2.0",
"result": [
{
"userid": "1",
"alias": "Admin",
"name": "Zabbix",
"surname": "Administrator",
"url": "",
"autologin": "1",
"autologout": "0",
"lang": "en_GB",
"refresh": "30s",
"type": "3",
"theme": "default",
"attempt_failed": "0",
"attempt_ip": "10.0.2.2",
"attempt_clock": "1515922072",
"rows_per_page": "50",
}
],
"id": 0,
}
with patch.object(zabbix, "_query", return_value=query_return):
with patch.object(zabbix, "_login", return_value=CONN_ARGS):
self.assertEqual(
zabbix.user_exists("Admin", **CONN_ARGS), module_return
)
def test_user_get(self):
"""
query_submitted = {"params": {"filter": {"alias": "Admin"}, "output": "extend"},
"jsonrpc": "2.0", "id": 0, "auth": "49ef327f205d9e9150d4651cb6adc2d5",
"method": "user.get"}
"""
module_return = [
{
"lang": "en_GB",
"rows_per_page": "50",
"surname": "Administrator",
"name": "Zabbix",
"url": "",
"attempt_clock": "1515922072",
"userid": "1",
"autologin": "1",
"refresh": "30s",
"attempt_failed": "0",
"alias": "Admin",
"theme": "default",
"autologout": "0",
"attempt_ip": "10.0.2.2",
"type": "3",
}
]
# pylint: disable=E8128
query_return = {
"jsonrpc": "2.0",
"result": [
{
"userid": "1",
"alias": "Admin",
"name": "Zabbix",
"surname": "Administrator",
"url": "",
"autologin": "1",
"autologout": "0",
"lang": "en_GB",
"refresh": "30s",
"type": "3",
"theme": "default",
"attempt_failed": "0",
"attempt_ip": "10.0.2.2",
"attempt_clock": "1515922072",
"rows_per_page": "50",
}
],
"id": 0,
}
with patch.object(zabbix, "_query", return_value=query_return):
with patch.object(zabbix, "_login", return_value=CONN_ARGS):
self.assertEqual(zabbix.user_get("Admin", **CONN_ARGS), module_return)
self.assertEqual(
zabbix.user_get(userids="1", **CONN_ARGS), module_return
)
def test_user_update(self):
"""
query_submitted = {"params": {"userid": 3, "name": "James Brown"}, "jsonrpc": "2.0",
"id": 0, "auth": "cdf2ee35e3bc47560585e9c457cbc398", "method": "user.update"}
"""
module_return = ["3"]
query_return = {"jsonrpc": "2.0", "result": {"userids": ["3"]}, "id": 0}
with patch.object(zabbix, "_query", return_value=query_return):
with patch.object(zabbix, "_login", return_value=CONN_ARGS):
self.assertEqual(
zabbix.user_update("3", visible_name="James Brown", **CONN_ARGS),
module_return,
)
def test_user_getmedia(self):
"""
query_submitted = {"params": {"userids": 3}, "jsonrpc": "2.0", "id": 0,
"auth": "d4de741ea7cdd434b3ba7b56efa4efaf", "method": "usermedia.get"}
"""
module_return = [
{
"mediatypeid": "1",
"mediaid": "1",
"severity": "63",
"userid": "3",
"period": "1-7,00:00-24:00",
"sendto": "[email protected]",
"active": "0",
}
]
# pylint: disable=E8128
query_return = {
"jsonrpc": "2.0",
"result": [
{
"mediaid": "1",
"userid": "3",
"mediatypeid": "1",
"sendto": "[email protected]",
"active": "0",
"severity": "63",
"period": "1-7,00:00-24:00",
}
],
"id": 0,
}
with patch.object(zabbix, "_query", return_value=query_return):
with patch.object(zabbix, "_login", return_value=CONN_ARGS):
self.assertEqual(zabbix.user_getmedia("3", **CONN_ARGS), module_return)
def test_user_addmedia(self):
"""
query_submitted = {"params": {"medias": [{"active": 0, "mediatypeid": 1,
"period": "1-7,00:00-24:00", "severity": 63, "sendto": "[email protected]"}],
"users": [{"userid": 1}]}, "jsonrpc": "2.0", "id": 0, "auth": "b347fc1bf1f5617b93755619a037c19e",
"method": "user.addmedia"}
"""
module_return = ["2"]
query_return = {"jsonrpc": "2.0", "result": {"mediaids": ["2"]}, "id": 0}
with patch.object(zabbix, "_query", return_value=query_return):
with patch.object(zabbix, "_login", return_value=CONN_ARGS):
self.assertEqual(
zabbix.user_addmedia(
"1",
active="0",
mediatypeid="1",
period="1-7,00:00-24:00",
sendto="[email protected]",
severity="63",
**CONN_ARGS
),
module_return,
)
def test_user_deletemedia(self):
"""
query_submitted = {"params": [1], "jsonrpc": "2.0", "id": 0, "auth": "9fb226c759a320de0de3b7a141404506",
"method": "user.deletemedia"}
"""
module_return = [1]
query_return = {"jsonrpc": "2.0", "result": {"mediaids": [1]}, "id": 0}
with patch.object(zabbix, "_query", return_value=query_return):
with patch.object(zabbix, "_login", return_value=CONN_ARGS):
self.assertEqual(
zabbix.user_deletemedia("1", **CONN_ARGS), module_return
)
def test_user_list(self):
"""
query_submitted = {"params": {"output": "extend"}, "jsonrpc": "2.0", "id": 0,
"auth": "54d67b63c37e690cf06972678f1e9720", "method": "user.get"}
"""
module_return = [
{
"lang": "en_GB",
"rows_per_page": "50",
"surname": "Administrator",
"name": "Zabbix",
"url": "",
"attempt_clock": "1515922072",
"userid": "1",
"autologin": "1",
"refresh": "30s",
"attempt_failed": "0",
"alias": "Admin",
"theme": "default",
"autologout": "0",
"attempt_ip": "10.0.2.2",
"type": "3",
},
{
"lang": "en_GB",
"rows_per_page": "50",
"surname": "",
"name": "",
"url": "",
"attempt_clock": "0",
"userid": "2",
"autologin": "0",
"refresh": "30s",
"attempt_failed": "0",
"alias": "guest",
"theme": "default",
"autologout": "15m",
"attempt_ip": "",
"type": "1",
},
{
"lang": "en_GB",
"rows_per_page": "50",
"surname": "",
"name": "James Brown",
"url": "",
"attempt_clock": "0",
"userid": "5",
"autologin": "0",
"refresh": "30s",
"attempt_failed": "0",
"alias": "james",
"theme": "default",
"autologout": "15m",
"attempt_ip": "",
"type": "1",
},
]
# pylint: disable=E8128
query_return = {
"jsonrpc": "2.0",
"result": [
{
"userid": "1",
"alias": "Admin",
"name": "Zabbix",
"surname": "Administrator",
"url": "",
"autologin": "1",
"autologout": "0",
"lang": "en_GB",
"refresh": "30s",
"type": "3",
"theme": "default",
"attempt_failed": "0",
"attempt_ip": "10.0.2.2",
"attempt_clock": "1515922072",
"rows_per_page": "50",
},
{
"userid": "2",
"alias": "guest",
"name": "",
"surname": "",
"url": "",
"autologin": "0",
"autologout": "15m",
"lang": "en_GB",
"refresh": "30s",
"type": "1",
"theme": "default",
"attempt_failed": "0",
"attempt_ip": "",
"attempt_clock": "0",
"rows_per_page": "50",
},
{
"userid": "5",
"alias": "james",
"name": "James Brown",
"surname": "",
"url": "",
"autologin": "0",
"autologout": "15m",
"lang": "en_GB",
"refresh": "30s",
"type": "1",
"theme": "default",
"attempt_failed": "0",
"attempt_ip": "",
"attempt_clock": "0",
"rows_per_page": "50",
},
],
"id": 0,
}
with patch.object(zabbix, "_query", return_value=query_return):
with patch.object(zabbix, "_login", return_value=CONN_ARGS):
self.assertEqual(zabbix.user_list(**CONN_ARGS), module_return)
def test_usergroup_create(self):
"""
query_submitted = {"params": {"name": "testgroup"}, "jsonrpc": "2.0", "id": 0,
"auth": "7f3ac5e90201e5de4eb19e5322606575", "method": "usergroup.create"}
"""
module_return = ["13"]
query_return = {"jsonrpc": "2.0", "result": {"usrgrpids": ["13"]}, "id": 0}
with patch.object(zabbix, "_query", return_value=query_return):
with patch.object(zabbix, "_login", return_value=CONN_ARGS):
self.assertEqual(
zabbix.usergroup_create("testgroup", **CONN_ARGS), module_return
)
def test_usergroup_delete(self):
"""
query_submitted = {"params": [13], "jsonrpc": "2.0", "id": 0,
"auth": "9bad39de2a5a9211da588dd06dad8773", "method": "usergroup.delete"}
"""
module_return = ["13"]
query_return = {"jsonrpc": "2.0", "result": {"usrgrpids": ["13"]}, "id": 0}
with patch.object(zabbix, "_query", return_value=query_return):
with patch.object(zabbix, "_login", return_value=CONN_ARGS):
self.assertEqual(
zabbix.usergroup_delete("13", **CONN_ARGS), module_return
)
def test_usergroup_exists(self):
"""
query_submitted = {"params": {"filter": {"name": "testgroup"}, "output": "extend",
"selectRights": "extend"}, "jsonrpc": "2.0", "id": 0, "auth": "e62424cd7aa71f6748e1d69c190ac852",
"method": "usergroup.get"}
"""
module_return = True
query_return = {
"jsonrpc": "2.0",
"result": [
{
"usrgrpid": "13",
"name": "testgroup",
"gui_access": "0",
"users_status": "0",
"debug_mode": "0",
"rights": [],
}
],
"id": 0,
}
with patch.object(zabbix, "apiinfo_version", return_value="3.2"):
with patch.object(zabbix, "_query", return_value=query_return):
with patch.object(zabbix, "_login", return_value=CONN_ARGS):
self.assertEqual(
zabbix.usergroup_exists("testgroup", **CONN_ARGS), module_return
)
def test_usergroup_get(self):
"""
query_submitted = {"params": {"filter": {"name": "testgroup"}, "output": "extend",
"selectRights": "extend"}, "jsonrpc": "2.0", "id": 0, "auth": "739cf358050f2a2d33162fdcfa714a3c",
"method": "usergroup.get"}
"""
module_return = [
{
"name": "testgroup",
"rights": [],
"users_status": "0",
"gui_access": "0",
"debug_mode": "0",
"usrgrpid": "13",
}
]
query_return = {
"jsonrpc": "2.0",
"result": [
{
"usrgrpid": "13",
"name": "testgroup",
"gui_access": "0",
"users_status": "0",
"debug_mode": "0",
"rights": [],
}
],
"id": 0,
}
with patch.object(zabbix, "apiinfo_version", return_value="3.2"):
with patch.object(zabbix, "_query", return_value=query_return):
with patch.object(zabbix, "_login", return_value=CONN_ARGS):
self.assertEqual(
zabbix.usergroup_get("testgroup", **CONN_ARGS), module_return
)
def test_usergroup_update(self):
"""
query_submitted = {"params": {"usrgrpid": 13, "users_status": 1}, "jsonrpc": "2.0",
"id": 0, "auth": "ef772237245f59f655871bc8fbbcd67c", "method": "usergroup.update"}
"""
module_return = ["13"]
query_return = {"jsonrpc": "2.0", "result": {"usrgrpids": ["13"]}, "id": 0}
with patch.object(zabbix, "_query", return_value=query_return):
with patch.object(zabbix, "_login", return_value=CONN_ARGS):
self.assertEqual(
zabbix.usergroup_update("13", users_status="1", **CONN_ARGS),
module_return,
)
def test_usergroup_list(self):
"""
query_submitted = {"params": {"output": "extend"}, "jsonrpc": "2.0", "id": 0,
"auth": "4bc366bc7803c07e80f15b1bc14dc61f", "method": "usergroup.get"}
"""
module_return = [
{
"usrgrpid": "7",
"gui_access": "0",
"debug_mode": "0",
"name": "Zabbix administrators",
"users_status": "0",
},
{
"usrgrpid": "8",
"gui_access": "0",
"debug_mode": "0",
"name": "Guests",
"users_status": "0",
},
{
"usrgrpid": "9",
"gui_access": "0",
"debug_mode": "0",
"name": "Disabled",
"users_status": "1",
},
{
"usrgrpid": "11",
"gui_access": "0",
"debug_mode": "1",
"name": "Enabled debug mode",
"users_status": "0",
},
{
"usrgrpid": "12",
"gui_access": "2",
"debug_mode": "0",
"name": "No access to the frontend",
"users_status": "0",
},
{
"usrgrpid": "13",
"gui_access": "0",
"debug_mode": "0",
"name": "testgroup",
"users_status": "0",
},
]
# pylint: disable=E8128
query_return = {
"jsonrpc": "2.0",
"result": [
{
"usrgrpid": "7",
"name": "Zabbix administrators",
"gui_access": "0",
"users_status": "0",
"debug_mode": "0",
},
{
"usrgrpid": "8",
"name": "Guests",
"gui_access": "0",
"users_status": "0",
"debug_mode": "0",
},
{
"usrgrpid": "9",
"name": "Disabled",
"gui_access": "0",
"users_status": "1",
"debug_mode": "0",
},
{
"usrgrpid": "11",
"name": "Enabled debug mode",
"gui_access": "0",
"users_status": "0",
"debug_mode": "1",
},
{
"usrgrpid": "12",
"name": "No access to the frontend",
"gui_access": "2",
"users_status": "0",
"debug_mode": "0",
},
{
"usrgrpid": "13",
"name": "testgroup",
"gui_access": "0",
"users_status": "0",
"debug_mode": "0",
},
],
"id": 0,
}
with patch.object(zabbix, "_query", return_value=query_return):
with patch.object(zabbix, "_login", return_value=CONN_ARGS):
self.assertEqual(zabbix.usergroup_list(**CONN_ARGS), module_return)
def test_host_inventory_get(self):
"""
test host_inventory_get
"""
module_return = {
"poc_2_email": "",
"poc_2_phone_b": "",
"site_country": "",
"poc_2_screen": "",
"poc_2_notes": "",
"poc_1_screen": "",
"hardware": "",
"software_app_a": "",
"software_app_b": "",
"software_app_c": "",
"software_app_d": "",
"os_short": "",
"site_zip": "",
"poc_2_name": "",
"os_full": "",
"host_netmask": "",
"host_router": "",
"url_c": "",
"date_hw_install": "",
"poc_1_phone_b": "",
"poc_1_phone_a": "",
"poc_1_cell": "",
"type_full": "",
"location_lat": "",
"vendor": "",
"contact": "",
"site_rack": "",
"location": "",
"poc_2_cell": "",
"date_hw_expiry": "",
"installer_name": "",
"type": "",
"contract_number": "",
"deployment_status": "",
"site_notes": "",
"inventory_mode": "0",
"oob_ip": "",
"host_networks": "",
"hardware_full": "",
"poc_2_phone_a": "",
"poc_1_name": "",
"site_state": "",
"chassis": "",
"software_app_e": "",
"site_address_b": "",
"site_address_a": "",
"date_hw_decomm": "",
"date_hw_purchase": "",
"location_lon": "",
"hw_arch": "",
"software_full": "",
"asset_tag": "",
"oob_router": "",
"hostid": "10258",
"poc_1_email": "",
"name": "",
"poc_1_notes": "",
"serialno_b": "",
"notes": "",
"oob_netmask": "",
"alias": "other thing",
"tag": "",
"macaddress_b": "",
"macaddress_a": "",
"site_city": "",
"site_address_c": "",
"model": "",
"serialno_a": "",
"os": "some",
"url_b": "",
"url_a": "",
"software": "",
}
query_return = {
"jsonrpc": "2.0",
"result": [
{
"hostid": "10258",
"proxy_hostid": "0",
"host": "master",
"status": "0",
"disable_until": "1517766661",
"error": (
"Get value from agent failed: cannot connect to"
" [[10.0.2.15]:10050]: [111] Connection refused"
),
"available": "2",
"errors_from": "1516087871",
"lastaccess": "0",
"ipmi_authtype": "-1",
"ipmi_privilege": "2",
"ipmi_username": "",
"ipmi_password": "",
"ipmi_disable_until": "0",
"ipmi_available": "0",
"snmp_disable_until": "0",
"snmp_available": "0",
"maintenanceid": "0",
"maintenance_status": "0",
"maintenance_type": "0",
"maintenance_from": "0",
"ipmi_errors_from": "0",
"snmp_errors_from": "0",
"ipmi_error": "",
"snmp_error": "",
"jmx_disable_until": "0",
"jmx_available": "0",
"jmx_errors_from": "0",
"jmx_error": "",
"name": "master",
"flags": "0",
"templateid": "0",
"description": "",
"tls_connect": "1",
"tls_accept": "1",
"tls_issuer": "",
"tls_subject": "",
"tls_psk_identity": "",
"tls_psk": "",
"inventory": {
"hostid": "10258",
"inventory_mode": "0",
"type": "",
"type_full": "",
"name": "",
"alias": "other thing",
"os": "some",
"os_full": "",
"os_short": "",
"serialno_a": "",
"serialno_b": "",
"tag": "",
"asset_tag": "",
"macaddress_a": "",
"macaddress_b": "",
"hardware": "",
"hardware_full": "",
"software": "",
"software_full": "",
"software_app_a": "",
"software_app_b": "",
"software_app_c": "",
"software_app_d": "",
"software_app_e": "",
"contact": "",
"location": "",
"location_lat": "",
"location_lon": "",
"notes": "",
"chassis": "",
"model": "",
"hw_arch": "",
"vendor": "",
"contract_number": "",
"installer_name": "",
"deployment_status": "",
"url_a": "",
"url_b": "",
"url_c": "",
"host_networks": "",
"host_netmask": "",
"host_router": "",
"oob_ip": "",
"oob_netmask": "",
"oob_router": "",
"date_hw_purchase": "",
"date_hw_install": "",
"date_hw_expiry": "",
"date_hw_decomm": "",
"site_address_a": "",
"site_address_b": "",
"site_address_c": "",
"site_city": "",
"site_state": "",
"site_country": "",
"site_zip": "",
"site_rack": "",
"site_notes": "",
"poc_1_name": "",
"poc_1_email": "",
"poc_1_phone_a": "",
"poc_1_phone_b": "",
"poc_1_cell": "",
"poc_1_screen": "",
"poc_1_notes": "",
"poc_2_name": "",
"poc_2_email": "",
"poc_2_phone_a": "",
"poc_2_phone_b": "",
"poc_2_cell": "",
"poc_2_screen": "",
"poc_2_notes": "",
},
}
],
"id": 1,
}
with patch.object(zabbix, "_query", return_value=query_return):
with patch.object(zabbix, "_login", return_value=CONN_ARGS):
self.assertEqual(
zabbix.host_inventory_get("12345", **CONN_ARGS), module_return
)
def test_host_inventory_get_with_disabled_inventory(self):
"""
test host_inventory_get with a host with inventory disabled
"""
module_return = False
query_return = {
"jsonrpc": "2.0",
"result": [
{
"hostid": "10258",
"proxy_hostid": "0",
"host": "master",
"status": "0",
"disable_until": "1517766661",
"error": "Get value from agent failed: cannot connect to [[10.0.2.15]:10050]: [111] Connection refused",
"available": "2",
"errors_from": "1516087871",
"lastaccess": "0",
"ipmi_authtype": "-1",
"ipmi_privilege": "2",
"ipmi_username": "",
"ipmi_password": "",
"ipmi_disable_until": "0",
"ipmi_available": "0",
"snmp_disable_until": "0",
"snmp_available": "0",
"maintenanceid": "0",
"maintenance_status": "0",
"maintenance_type": "0",
"maintenance_from": "0",
"ipmi_errors_from": "0",
"snmp_errors_from": "0",
"ipmi_error": "",
"snmp_error": "",
"jmx_disable_until": "0",
"jmx_available": "0",
"jmx_errors_from": "0",
"jmx_error": "",
"name": "master",
"flags": "0",
"templateid": "0",
"description": "",
"tls_connect": "1",
"tls_accept": "1",
"tls_issuer": "",
"tls_subject": "",
"tls_psk_identity": "",
"tls_psk": "",
"inventory": [],
}
],
"id": 1,
}
with patch.object(zabbix, "_query", return_value=query_return):
with patch.object(zabbix, "_login", return_value=CONN_ARGS):
self.assertEqual(
zabbix.host_inventory_get("12345", **CONN_ARGS), module_return
)
def test_host_inventory_get_with_a_missing_host(self):
"""
test host_inventory_get with a non-existent host
"""
module_return = False
query_return = {
"jsonrpc": "2.0",
"result": [],
"id": 0,
}
with patch.object(zabbix, "_query", return_value=query_return):
with patch.object(zabbix, "_login", return_value=CONN_ARGS):
self.assertEqual(
zabbix.host_inventory_get("12345", **CONN_ARGS), module_return
)
def test_host_inventory_set(self):
"""
query_submitted = {"params": {"hostid": 10258, "inventory_mode": "0", "inventory":
{"asset_tag": "jml3322", "type": "Xen"}}, "jsonrpc": "2.0", "id": 0,
"auth": "a50d2c3030b9b73d7c28b5ebd89c044c", "method": "host.update"}
"""
module_return = {"hostids": [10258]}
query_return = {"jsonrpc": "2.0", "result": {"hostids": [10258]}, "id": 0}
with patch.object(
zabbix, "_query", autospec=True, return_value=query_return
) as mock__query:
with patch.object(zabbix, "_login", return_value=CONN_ARGS):
self.assertEqual(
zabbix.host_inventory_set(
10258, asset_tag="jml3322", type="Xen", **CONN_ARGS
),
module_return,
)
mock__query.assert_called_with(
"host.update",
{
"hostid": 10258,
"inventory_mode": "0",
"inventory": {
"asset_tag": "jml3322",
"type": "Xen",
"url": "http://test.url",
"auth": "1234",
},
},
"http://test.url",
"1234",
)
def test_host_inventory_set_with_inventory_mode(self):
"""
query_submitted = {"params": {"hostid": 10258, "inventory_mode": "1", "inventory":
{"asset_tag": "jml3322", "type": "Xen"}}, "jsonrpc": "2.0", "id": 0,
"auth": "a50d2c3030b9b73d7c28b5ebd89c044c", "method": "host.update"}
"""
module_return = {"hostids": [10258]}
query_return = {"jsonrpc": "2.0", "result": {"hostids": [10258]}, "id": 0}
with patch.object(
zabbix, "_query", autospec=True, return_value=query_return
) as mock__query:
with patch.object(zabbix, "_login", return_value=CONN_ARGS):
self.assertEqual(
zabbix.host_inventory_set(
10258,
asset_tag="jml3322",
type="Xen",
inventory_mode="1",
**CONN_ARGS
),
module_return,
)
mock__query.assert_called_with(
"host.update",
{
"hostid": 10258,
"inventory_mode": "1",
"inventory": {
"asset_tag": "jml3322",
"type": "Xen",
"url": "http://test.url",
"auth": "1234",
},
},
"http://test.url",
"1234",
)
|
|
# oracle/base.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the Oracle database.
Oracle version 8 through current (11g at the time of this writing) are supported.
For information on connecting via specific drivers, see the documentation
for that driver.
Connect Arguments
-----------------
The dialect supports several :func:`~sqlalchemy.create_engine()` arguments which
affect the behavior of the dialect regardless of driver in use.
* *use_ansi* - Use ANSI JOIN constructs (see the section on Oracle 8). Defaults
to ``True``. If ``False``, Oracle-8 compatible constructs are used for joins.
* *optimize_limits* - defaults to ``False``. see the section on LIMIT/OFFSET.
* *use_binds_for_limits* - defaults to ``True``. see the section on LIMIT/OFFSET.
Auto Increment Behavior
-----------------------
SQLAlchemy Table objects which include integer primary keys are usually assumed to have
"autoincrementing" behavior, meaning they can generate their own primary key values upon
INSERT. Since Oracle has no "autoincrement" feature, SQLAlchemy relies upon sequences
to produce these values. With the Oracle dialect, *a sequence must always be explicitly
specified to enable autoincrement*. This is divergent with the majority of documentation
examples which assume the usage of an autoincrement-capable database. To specify sequences,
use the sqlalchemy.schema.Sequence object which is passed to a Column construct::
t = Table('mytable', metadata,
Column('id', Integer, Sequence('id_seq'), primary_key=True),
Column(...), ...
)
This step is also required when using table reflection, i.e. autoload=True::
t = Table('mytable', metadata,
Column('id', Integer, Sequence('id_seq'), primary_key=True),
autoload=True
)
Identifier Casing
-----------------
In Oracle, the data dictionary represents all case insensitive identifier names
using UPPERCASE text. SQLAlchemy on the other hand considers an all-lower case identifier
name to be case insensitive. The Oracle dialect converts all case insensitive identifiers
to and from those two formats during schema level communication, such as reflection of
tables and indexes. Using an UPPERCASE name on the SQLAlchemy side indicates a
case sensitive identifier, and SQLAlchemy will quote the name - this will cause mismatches
against data dictionary data received from Oracle, so unless identifier names have been
truly created as case sensitive (i.e. using quoted names), all lowercase names should be
used on the SQLAlchemy side.
Unicode
-------
SQLAlchemy 0.6 uses the "native unicode" mode provided as of cx_oracle 5. cx_oracle 5.0.2
or greater is recommended for support of NCLOB. If not using cx_oracle 5, the NLS_LANG
environment variable needs to be set in order for the oracle client library to use
proper encoding, such as "AMERICAN_AMERICA.UTF8".
Also note that Oracle supports unicode data through the NVARCHAR and NCLOB data types.
When using the SQLAlchemy Unicode and UnicodeText types, these DDL types will be used
within CREATE TABLE statements. Usage of VARCHAR2 and CLOB with unicode text still
requires NLS_LANG to be set.
LIMIT/OFFSET Support
--------------------
Oracle has no support for the LIMIT or OFFSET keywords. SQLAlchemy uses
a wrapped subquery approach in conjunction with ROWNUM. The exact methodology
is taken from
http://www.oracle.com/technology/oramag/oracle/06-sep/o56asktom.html .
There are two options which affect its behavior:
* the "FIRST ROWS()" optimization keyword is not used by default. To enable the usage of this
optimization directive, specify ``optimize_limits=True`` to :func:`.create_engine`.
* the values passed for the limit/offset are sent as bound parameters. Some users have observed
that Oracle produces a poor query plan when the values are sent as binds and not
rendered literally. To render the limit/offset values literally within the SQL
statement, specify ``use_binds_for_limits=False`` to :func:`.create_engine`.
Some users have reported better performance when the entirely different approach of a
window query is used, i.e. ROW_NUMBER() OVER (ORDER BY), to provide LIMIT/OFFSET (note
that the majority of users don't observe this). To suit this case the
method used for LIMIT/OFFSET can be replaced entirely. See the recipe at
http://www.sqlalchemy.org/trac/wiki/UsageRecipes/WindowFunctionsByDefault
which installs a select compiler that overrides the generation of limit/offset with
a window function.
ON UPDATE CASCADE
-----------------
Oracle doesn't have native ON UPDATE CASCADE functionality. A trigger based solution
is available at http://asktom.oracle.com/tkyte/update_cascade/index.html .
When using the SQLAlchemy ORM, the ORM has limited ability to manually issue
cascading updates - specify ForeignKey objects using the
"deferrable=True, initially='deferred'" keyword arguments,
and specify "passive_updates=False" on each relationship().
Oracle 8 Compatibility
----------------------
When Oracle 8 is detected, the dialect internally configures itself to the following
behaviors:
* the use_ansi flag is set to False. This has the effect of converting all
JOIN phrases into the WHERE clause, and in the case of LEFT OUTER JOIN
makes use of Oracle's (+) operator.
* the NVARCHAR2 and NCLOB datatypes are no longer generated as DDL when
the :class:`~sqlalchemy.types.Unicode` is used - VARCHAR2 and CLOB are issued
instead. This because these types don't seem to work correctly on Oracle 8
even though they are available. The :class:`~sqlalchemy.types.NVARCHAR`
and :class:`~sqlalchemy.dialects.oracle.NCLOB` types will always generate NVARCHAR2 and NCLOB.
* the "native unicode" mode is disabled when using cx_oracle, i.e. SQLAlchemy
encodes all Python unicode objects to "string" before passing in as bind parameters.
Synonym/DBLINK Reflection
-------------------------
When using reflection with Table objects, the dialect can optionally search for tables
indicated by synonyms that reference DBLINK-ed tables by passing the flag
oracle_resolve_synonyms=True as a keyword argument to the Table construct. If DBLINK
is not in use this flag should be left off.
"""
import random, re
from sqlalchemy import schema as sa_schema
from sqlalchemy import util, sql, log
from sqlalchemy.engine import default, base, reflection
from sqlalchemy.sql import compiler, visitors, expression
from sqlalchemy.sql import operators as sql_operators, functions as sql_functions
from sqlalchemy import types as sqltypes
from sqlalchemy.types import VARCHAR, NVARCHAR, CHAR, DATE, DATETIME, \
BLOB, CLOB, TIMESTAMP, FLOAT
RESERVED_WORDS = \
set('SHARE RAW DROP BETWEEN FROM DESC OPTION PRIOR LONG THEN '\
'DEFAULT ALTER IS INTO MINUS INTEGER NUMBER GRANT IDENTIFIED '\
'ALL TO ORDER ON FLOAT DATE HAVING CLUSTER NOWAIT RESOURCE '\
'ANY TABLE INDEX FOR UPDATE WHERE CHECK SMALLINT WITH DELETE '\
'BY ASC REVOKE LIKE SIZE RENAME NOCOMPRESS NULL GROUP VALUES '\
'AS IN VIEW EXCLUSIVE COMPRESS SYNONYM SELECT INSERT EXISTS '\
'NOT TRIGGER ELSE CREATE INTERSECT PCTFREE DISTINCT USER '\
'CONNECT SET MODE OF UNIQUE VARCHAR2 VARCHAR LOCK OR CHAR '\
'DECIMAL UNION PUBLIC AND START UID COMMENT'.split())
NO_ARG_FNS = set('UID CURRENT_DATE SYSDATE USER '
'CURRENT_TIME CURRENT_TIMESTAMP'.split())
class RAW(sqltypes.LargeBinary):
pass
OracleRaw = RAW
class NCLOB(sqltypes.Text):
__visit_name__ = 'NCLOB'
VARCHAR2 = VARCHAR
NVARCHAR2 = NVARCHAR
class NUMBER(sqltypes.Numeric, sqltypes.Integer):
__visit_name__ = 'NUMBER'
def __init__(self, precision=None, scale=None, asdecimal=None):
if asdecimal is None:
asdecimal = bool(scale and scale > 0)
super(NUMBER, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal)
def adapt(self, impltype):
ret = super(NUMBER, self).adapt(impltype)
# leave a hint for the DBAPI handler
ret._is_oracle_number = True
return ret
@property
def _type_affinity(self):
if bool(self.scale and self.scale > 0):
return sqltypes.Numeric
else:
return sqltypes.Integer
class DOUBLE_PRECISION(sqltypes.Numeric):
__visit_name__ = 'DOUBLE_PRECISION'
def __init__(self, precision=None, scale=None, asdecimal=None):
if asdecimal is None:
asdecimal = False
super(DOUBLE_PRECISION, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal)
class BFILE(sqltypes.LargeBinary):
__visit_name__ = 'BFILE'
class LONG(sqltypes.Text):
__visit_name__ = 'LONG'
class INTERVAL(sqltypes.TypeEngine):
__visit_name__ = 'INTERVAL'
def __init__(self,
day_precision=None,
second_precision=None):
"""Construct an INTERVAL.
Note that only DAY TO SECOND intervals are currently supported.
This is due to a lack of support for YEAR TO MONTH intervals
within available DBAPIs (cx_oracle and zxjdbc).
:param day_precision: the day precision value. this is the number of digits
to store for the day field. Defaults to "2"
:param second_precision: the second precision value. this is the number of digits
to store for the fractional seconds field. Defaults to "6".
"""
self.day_precision = day_precision
self.second_precision = second_precision
@classmethod
def _adapt_from_generic_interval(cls, interval):
return INTERVAL(day_precision=interval.day_precision,
second_precision=interval.second_precision)
@property
def _type_affinity(self):
return sqltypes.Interval
class ROWID(sqltypes.TypeEngine):
"""Oracle ROWID type.
When used in a cast() or similar, generates ROWID.
"""
__visit_name__ = 'ROWID'
class _OracleBoolean(sqltypes.Boolean):
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
colspecs = {
sqltypes.Boolean : _OracleBoolean,
sqltypes.Interval : INTERVAL,
}
ischema_names = {
'VARCHAR2' : VARCHAR,
'NVARCHAR2' : NVARCHAR,
'CHAR' : CHAR,
'DATE' : DATE,
'NUMBER' : NUMBER,
'BLOB' : BLOB,
'BFILE' : BFILE,
'CLOB' : CLOB,
'NCLOB' : NCLOB,
'TIMESTAMP' : TIMESTAMP,
'TIMESTAMP WITH TIME ZONE' : TIMESTAMP,
'INTERVAL DAY TO SECOND' : INTERVAL,
'RAW' : RAW,
'FLOAT' : FLOAT,
'DOUBLE PRECISION' : DOUBLE_PRECISION,
'LONG' : LONG,
}
class OracleTypeCompiler(compiler.GenericTypeCompiler):
# Note:
# Oracle DATE == DATETIME
# Oracle does not allow milliseconds in DATE
# Oracle does not support TIME columns
def visit_datetime(self, type_):
return self.visit_DATE(type_)
def visit_float(self, type_):
return self.visit_FLOAT(type_)
def visit_unicode(self, type_):
if self.dialect._supports_nchar:
return self.visit_NVARCHAR(type_)
else:
return self.visit_VARCHAR(type_)
def visit_INTERVAL(self, type_):
return "INTERVAL DAY%s TO SECOND%s" % (
type_.day_precision is not None and
"(%d)" % type_.day_precision or
"",
type_.second_precision is not None and
"(%d)" % type_.second_precision or
"",
)
def visit_TIMESTAMP(self, type_):
if type_.timezone:
return "TIMESTAMP WITH TIME ZONE"
else:
return "TIMESTAMP"
def visit_DOUBLE_PRECISION(self, type_):
return self._generate_numeric(type_, "DOUBLE PRECISION")
def visit_NUMBER(self, type_, **kw):
return self._generate_numeric(type_, "NUMBER", **kw)
def _generate_numeric(self, type_, name, precision=None, scale=None):
if precision is None:
precision = type_.precision
if scale is None:
scale = getattr(type_, 'scale', None)
if precision is None:
return name
elif scale is None:
return "%(name)s(%(precision)s)" % {'name':name,'precision': precision}
else:
return "%(name)s(%(precision)s, %(scale)s)" % {'name':name,'precision': precision, 'scale' : scale}
def visit_VARCHAR(self, type_):
if self.dialect._supports_char_length:
return "VARCHAR(%(length)s CHAR)" % {'length' : type_.length}
else:
return "VARCHAR(%(length)s)" % {'length' : type_.length}
def visit_NVARCHAR(self, type_):
return "NVARCHAR2(%(length)s)" % {'length' : type_.length}
def visit_text(self, type_):
return self.visit_CLOB(type_)
def visit_unicode_text(self, type_):
if self.dialect._supports_nchar:
return self.visit_NCLOB(type_)
else:
return self.visit_CLOB(type_)
def visit_large_binary(self, type_):
return self.visit_BLOB(type_)
def visit_big_integer(self, type_):
return self.visit_NUMBER(type_, precision=19)
def visit_boolean(self, type_):
return self.visit_SMALLINT(type_)
def visit_RAW(self, type_):
return "RAW(%(length)s)" % {'length' : type_.length}
def visit_ROWID(self, type_):
return "ROWID"
class OracleCompiler(compiler.SQLCompiler):
"""Oracle compiler modifies the lexical structure of Select
statements to work under non-ANSI configured Oracle databases, if
the use_ansi flag is False.
"""
compound_keywords = util.update_copy(
compiler.SQLCompiler.compound_keywords,
{
expression.CompoundSelect.EXCEPT : 'MINUS'
}
)
def __init__(self, *args, **kwargs):
self.__wheres = {}
self._quoted_bind_names = {}
super(OracleCompiler, self).__init__(*args, **kwargs)
def visit_mod(self, binary, **kw):
return "mod(%s, %s)" % (self.process(binary.left), self.process(binary.right))
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_char_length_func(self, fn, **kw):
return "LENGTH" + self.function_argspec(fn, **kw)
def visit_match_op(self, binary, **kw):
return "CONTAINS (%s, %s)" % (self.process(binary.left), self.process(binary.right))
def get_select_hint_text(self, byfroms):
return " ".join(
"/*+ %s */" % text for table, text in byfroms.items()
)
def function_argspec(self, fn, **kw):
if len(fn.clauses) > 0 or fn.name.upper() not in NO_ARG_FNS:
return compiler.SQLCompiler.function_argspec(self, fn, **kw)
else:
return ""
def default_from(self):
"""Called when a ``SELECT`` statement has no froms,
and no ``FROM`` clause is to be appended.
The Oracle compiler tacks a "FROM DUAL" to the statement.
"""
return " FROM DUAL"
def visit_join(self, join, **kwargs):
if self.dialect.use_ansi:
return compiler.SQLCompiler.visit_join(self, join, **kwargs)
else:
kwargs['asfrom'] = True
return self.process(join.left, **kwargs) + \
", " + self.process(join.right, **kwargs)
def _get_nonansi_join_whereclause(self, froms):
clauses = []
def visit_join(join):
if join.isouter:
def visit_binary(binary):
if binary.operator == sql_operators.eq:
if binary.left.table is join.right:
binary.left = _OuterJoinColumn(binary.left)
elif binary.right.table is join.right:
binary.right = _OuterJoinColumn(binary.right)
clauses.append(visitors.cloned_traverse(join.onclause, {},
{'binary':visit_binary}))
else:
clauses.append(join.onclause)
for j in join.left, join.right:
if isinstance(j, expression.Join):
visit_join(j)
for f in froms:
if isinstance(f, expression.Join):
visit_join(f)
if not clauses:
return None
else:
return sql.and_(*clauses)
def visit_outer_join_column(self, vc):
return self.process(vc.column) + "(+)"
def visit_sequence(self, seq):
return self.dialect.identifier_preparer.format_sequence(seq) + ".nextval"
def visit_alias(self, alias, asfrom=False, ashint=False, **kwargs):
"""Oracle doesn't like ``FROM table AS alias``. Is the AS standard SQL??"""
if asfrom or ashint:
alias_name = isinstance(alias.name, expression._generated_label) and \
self._truncated_identifier("alias", alias.name) or alias.name
if ashint:
return alias_name
elif asfrom:
return self.process(alias.original, asfrom=asfrom, **kwargs) + \
" " + self.preparer.format_alias(alias, alias_name)
else:
return self.process(alias.original, **kwargs)
def returning_clause(self, stmt, returning_cols):
def create_out_param(col, i):
bindparam = sql.outparam("ret_%d" % i, type_=col.type)
self.binds[bindparam.key] = bindparam
return self.bindparam_string(self._truncate_bindparam(bindparam))
columnlist = list(expression._select_iterables(returning_cols))
# within_columns_clause =False so that labels (foo AS bar) don't render
columns = [self.process(c, within_columns_clause=False, result_map=self.result_map) for c in columnlist]
binds = [create_out_param(c, i) for i, c in enumerate(columnlist)]
return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds)
def _TODO_visit_compound_select(self, select):
"""Need to determine how to get ``LIMIT``/``OFFSET`` into a ``UNION`` for Oracle."""
pass
def visit_select(self, select, **kwargs):
"""Look for ``LIMIT`` and OFFSET in a select statement, and if
so tries to wrap it in a subquery with ``rownum`` criterion.
"""
if not getattr(select, '_oracle_visit', None):
if not self.dialect.use_ansi:
if self.stack and 'from' in self.stack[-1]:
existingfroms = self.stack[-1]['from']
else:
existingfroms = None
froms = select._get_display_froms(existingfroms)
whereclause = self._get_nonansi_join_whereclause(froms)
if whereclause is not None:
select = select.where(whereclause)
select._oracle_visit = True
if select._limit is not None or select._offset is not None:
# See http://www.oracle.com/technology/oramag/oracle/06-sep/o56asktom.html
#
# Generalized form of an Oracle pagination query:
# select ... from (
# select /*+ FIRST_ROWS(N) */ ...., rownum as ora_rn from (
# select distinct ... where ... order by ...
# ) where ROWNUM <= :limit+:offset
# ) where ora_rn > :offset
# Outer select and "ROWNUM as ora_rn" can be dropped if limit=0
# TODO: use annotations instead of clone + attr set ?
select = select._generate()
select._oracle_visit = True
# Wrap the middle select and add the hint
limitselect = sql.select([c for c in select.c])
if select._limit and self.dialect.optimize_limits:
limitselect = limitselect.prefix_with("/*+ FIRST_ROWS(%d) */" % select._limit)
limitselect._oracle_visit = True
limitselect._is_wrapper = True
# If needed, add the limiting clause
if select._limit is not None:
max_row = select._limit
if select._offset is not None:
max_row += select._offset
if not self.dialect.use_binds_for_limits:
max_row = sql.literal_column("%d" % max_row)
limitselect.append_whereclause(
sql.literal_column("ROWNUM")<=max_row)
# If needed, add the ora_rn, and wrap again with offset.
if select._offset is None:
limitselect.for_update = select.for_update
select = limitselect
else:
limitselect = limitselect.column(
sql.literal_column("ROWNUM").label("ora_rn"))
limitselect._oracle_visit = True
limitselect._is_wrapper = True
offsetselect = sql.select(
[c for c in limitselect.c if c.key!='ora_rn'])
offsetselect._oracle_visit = True
offsetselect._is_wrapper = True
offset_value = select._offset
if not self.dialect.use_binds_for_limits:
offset_value = sql.literal_column("%d" % offset_value)
offsetselect.append_whereclause(
sql.literal_column("ora_rn")>offset_value)
offsetselect.for_update = select.for_update
select = offsetselect
kwargs['iswrapper'] = getattr(select, '_is_wrapper', False)
return compiler.SQLCompiler.visit_select(self, select, **kwargs)
def limit_clause(self, select):
return ""
def for_update_clause(self, select):
if self.is_subquery():
return ""
elif select.for_update == "nowait":
return " FOR UPDATE NOWAIT"
else:
return super(OracleCompiler, self).for_update_clause(select)
class OracleDDLCompiler(compiler.DDLCompiler):
def define_constraint_cascades(self, constraint):
text = ""
if constraint.ondelete is not None:
text += " ON DELETE %s" % constraint.ondelete
# oracle has no ON UPDATE CASCADE -
# its only available via triggers http://asktom.oracle.com/tkyte/update_cascade/index.html
if constraint.onupdate is not None:
util.warn(
"Oracle does not contain native UPDATE CASCADE "
"functionality - onupdates will not be rendered for foreign keys. "
"Consider using deferrable=True, initially='deferred' or triggers.")
return text
class OracleIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = set([x.lower() for x in RESERVED_WORDS])
illegal_initial_characters = set(xrange(0, 10)).union(["_", "$"])
def _bindparam_requires_quotes(self, value):
"""Return True if the given identifier requires quoting."""
lc_value = value.lower()
return (lc_value in self.reserved_words
or value[0] in self.illegal_initial_characters
or not self.legal_characters.match(unicode(value))
)
def format_savepoint(self, savepoint):
name = re.sub(r'^_+', '', savepoint.ident)
return super(OracleIdentifierPreparer, self).format_savepoint(savepoint, name)
class OracleExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq, type_):
return self._execute_scalar("SELECT " +
self.dialect.identifier_preparer.format_sequence(seq) +
".nextval FROM DUAL", type_)
class OracleDialect(default.DefaultDialect):
name = 'oracle'
supports_alter = True
supports_unicode_statements = False
supports_unicode_binds = False
max_identifier_length = 30
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
supports_sequences = True
sequences_optional = False
postfetch_lastrowid = False
default_paramstyle = 'named'
colspecs = colspecs
ischema_names = ischema_names
requires_name_normalize = True
supports_default_values = False
supports_empty_insert = False
statement_compiler = OracleCompiler
ddl_compiler = OracleDDLCompiler
type_compiler = OracleTypeCompiler
preparer = OracleIdentifierPreparer
execution_ctx_cls = OracleExecutionContext
reflection_options = ('oracle_resolve_synonyms', )
def __init__(self,
use_ansi=True,
optimize_limits=False,
use_binds_for_limits=True,
**kwargs):
default.DefaultDialect.__init__(self, **kwargs)
self.use_ansi = use_ansi
self.optimize_limits = optimize_limits
self.use_binds_for_limits = use_binds_for_limits
def initialize(self, connection):
super(OracleDialect, self).initialize(connection)
self.implicit_returning = self.__dict__.get(
'implicit_returning',
self.server_version_info > (10, )
)
if self._is_oracle_8:
self.colspecs = self.colspecs.copy()
self.colspecs.pop(sqltypes.Interval)
self.use_ansi = False
@property
def _is_oracle_8(self):
return self.server_version_info and \
self.server_version_info < (9, )
@property
def _supports_char_length(self):
return not self._is_oracle_8
@property
def _supports_nchar(self):
return not self._is_oracle_8
def do_release_savepoint(self, connection, name):
# Oracle does not support RELEASE SAVEPOINT
pass
def has_table(self, connection, table_name, schema=None):
if not schema:
schema = self.default_schema_name
cursor = connection.execute(
sql.text("SELECT table_name FROM all_tables "
"WHERE table_name = :name AND owner = :schema_name"),
name=self.denormalize_name(table_name), schema_name=self.denormalize_name(schema))
return cursor.first() is not None
def has_sequence(self, connection, sequence_name, schema=None):
if not schema:
schema = self.default_schema_name
cursor = connection.execute(
sql.text("SELECT sequence_name FROM all_sequences "
"WHERE sequence_name = :name AND sequence_owner = :schema_name"),
name=self.denormalize_name(sequence_name), schema_name=self.denormalize_name(schema))
return cursor.first() is not None
def normalize_name(self, name):
if name is None:
return None
# Py2K
if isinstance(name, str):
name = name.decode(self.encoding)
# end Py2K
if name.upper() == name and \
not self.identifier_preparer._requires_quotes(name.lower()):
return name.lower()
else:
return name
def denormalize_name(self, name):
if name is None:
return None
elif name.lower() == name and not self.identifier_preparer._requires_quotes(name.lower()):
name = name.upper()
# Py2K
if not self.supports_unicode_binds:
name = name.encode(self.encoding)
else:
name = unicode(name)
# end Py2K
return name
def _get_default_schema_name(self, connection):
return self.normalize_name(connection.execute(u'SELECT USER FROM DUAL').scalar())
def _resolve_synonym(self, connection, desired_owner=None, desired_synonym=None, desired_table=None):
"""search for a local synonym matching the given desired owner/name.
if desired_owner is None, attempts to locate a distinct owner.
returns the actual name, owner, dblink name, and synonym name if found.
"""
q = "SELECT owner, table_owner, table_name, db_link, synonym_name FROM all_synonyms WHERE "
clauses = []
params = {}
if desired_synonym:
clauses.append("synonym_name = :synonym_name")
params['synonym_name'] = desired_synonym
if desired_owner:
clauses.append("table_owner = :desired_owner")
params['desired_owner'] = desired_owner
if desired_table:
clauses.append("table_name = :tname")
params['tname'] = desired_table
q += " AND ".join(clauses)
result = connection.execute(sql.text(q), **params)
if desired_owner:
row = result.first()
if row:
return row['table_name'], row['table_owner'], row['db_link'], row['synonym_name']
else:
return None, None, None, None
else:
rows = result.fetchall()
if len(rows) > 1:
raise AssertionError("There are multiple tables visible to the schema, you must specify owner")
elif len(rows) == 1:
row = rows[0]
return row['table_name'], row['table_owner'], row['db_link'], row['synonym_name']
else:
return None, None, None, None
@reflection.cache
def _prepare_reflection_args(self, connection, table_name, schema=None,
resolve_synonyms=False, dblink='', **kw):
if resolve_synonyms:
actual_name, owner, dblink, synonym = self._resolve_synonym(
connection,
desired_owner=self.denormalize_name(schema),
desired_synonym=self.denormalize_name(table_name)
)
else:
actual_name, owner, dblink, synonym = None, None, None, None
if not actual_name:
actual_name = self.denormalize_name(table_name)
if not dblink:
dblink = ''
if not owner:
owner = self.denormalize_name(schema or self.default_schema_name)
return (actual_name, owner, dblink, synonym)
@reflection.cache
def get_schema_names(self, connection, **kw):
s = "SELECT username FROM all_users ORDER BY username"
cursor = connection.execute(s,)
return [self.normalize_name(row[0]) for row in cursor]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
schema = self.denormalize_name(schema or self.default_schema_name)
# note that table_names() isnt loading DBLINKed or synonym'ed tables
if schema is None:
schema = self.default_schema_name
s = sql.text(
"SELECT table_name FROM all_tables "
"WHERE nvl(tablespace_name, 'no tablespace') NOT IN ('SYSTEM', 'SYSAUX') "
"AND OWNER = :owner "
"AND IOT_NAME IS NULL")
cursor = connection.execute(s, owner=schema)
return [self.normalize_name(row[0]) for row in cursor]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
schema = self.denormalize_name(schema or self.default_schema_name)
s = sql.text("SELECT view_name FROM all_views WHERE owner = :owner")
cursor = connection.execute(s, owner=self.denormalize_name(schema))
return [self.normalize_name(row[0]) for row in cursor]
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
"""
kw arguments can be:
oracle_resolve_synonyms
dblink
"""
resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
dblink = kw.get('dblink', '')
info_cache = kw.get('info_cache')
(table_name, schema, dblink, synonym) = \
self._prepare_reflection_args(connection, table_name, schema,
resolve_synonyms, dblink,
info_cache=info_cache)
columns = []
if self._supports_char_length:
char_length_col = 'char_length'
else:
char_length_col = 'data_length'
c = connection.execute(sql.text(
"SELECT column_name, data_type, %(char_length_col)s, data_precision, data_scale, "
"nullable, data_default FROM ALL_TAB_COLUMNS%(dblink)s "
"WHERE table_name = :table_name AND owner = :owner "
"ORDER BY column_id" % {'dblink': dblink, 'char_length_col':char_length_col}),
table_name=table_name, owner=schema)
for row in c:
(colname, orig_colname, coltype, length, precision, scale, nullable, default) = \
(self.normalize_name(row[0]), row[0], row[1], row[2], row[3], row[4], row[5]=='Y', row[6])
if coltype == 'NUMBER' :
coltype = NUMBER(precision, scale)
elif coltype in ('VARCHAR2', 'NVARCHAR2', 'CHAR'):
coltype = self.ischema_names.get(coltype)(length)
elif 'WITH TIME ZONE' in coltype:
coltype = TIMESTAMP(timezone=True)
else:
coltype = re.sub(r'\(\d+\)', '', coltype)
try:
coltype = self.ischema_names[coltype]
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(coltype, colname))
coltype = sqltypes.NULLTYPE
cdict = {
'name': colname,
'type': coltype,
'nullable': nullable,
'default': default,
'autoincrement':default is None
}
if orig_colname.lower() == orig_colname:
cdict['quote'] = True
columns.append(cdict)
return columns
@reflection.cache
def get_indexes(self, connection, table_name, schema=None,
resolve_synonyms=False, dblink='', **kw):
info_cache = kw.get('info_cache')
(table_name, schema, dblink, synonym) = \
self._prepare_reflection_args(connection, table_name, schema,
resolve_synonyms, dblink,
info_cache=info_cache)
indexes = []
q = sql.text("""
SELECT a.index_name, a.column_name, b.uniqueness
FROM ALL_IND_COLUMNS%(dblink)s a,
ALL_INDEXES%(dblink)s b
WHERE
a.index_name = b.index_name
AND a.table_owner = b.table_owner
AND a.table_name = b.table_name
AND a.table_name = :table_name
AND a.table_owner = :schema
ORDER BY a.index_name, a.column_position""" % {'dblink': dblink})
rp = connection.execute(q, table_name=self.denormalize_name(table_name),
schema=self.denormalize_name(schema))
indexes = []
last_index_name = None
pkeys = self.get_primary_keys(connection, table_name, schema,
resolve_synonyms=resolve_synonyms,
dblink=dblink,
info_cache=kw.get('info_cache'))
uniqueness = dict(NONUNIQUE=False, UNIQUE=True)
oracle_sys_col = re.compile(r'SYS_NC\d+\$', re.IGNORECASE)
def upper_name_set(names):
return set([i.upper() for i in names])
pk_names = upper_name_set(pkeys)
def remove_if_primary_key(index):
# don't include the primary key index
if index is not None and \
upper_name_set(index['column_names']) == pk_names:
indexes.pop()
index = None
for rset in rp:
if rset.index_name != last_index_name:
remove_if_primary_key(index)
index = dict(name=self.normalize_name(rset.index_name), column_names=[])
indexes.append(index)
index['unique'] = uniqueness.get(rset.uniqueness, False)
# filter out Oracle SYS_NC names. could also do an outer join
# to the all_tab_columns table and check for real col names there.
if not oracle_sys_col.match(rset.column_name):
index['column_names'].append(self.normalize_name(rset.column_name))
last_index_name = rset.index_name
remove_if_primary_key(index)
return indexes
@reflection.cache
def _get_constraint_data(self, connection, table_name, schema=None,
dblink='', **kw):
rp = connection.execute(
sql.text("""SELECT
ac.constraint_name,
ac.constraint_type,
loc.column_name AS local_column,
rem.table_name AS remote_table,
rem.column_name AS remote_column,
rem.owner AS remote_owner,
loc.position as loc_pos,
rem.position as rem_pos
FROM all_constraints%(dblink)s ac,
all_cons_columns%(dblink)s loc,
all_cons_columns%(dblink)s rem
WHERE ac.table_name = :table_name
AND ac.constraint_type IN ('R','P')
AND ac.owner = :owner
AND ac.owner = loc.owner
AND ac.constraint_name = loc.constraint_name
AND ac.r_owner = rem.owner(+)
AND ac.r_constraint_name = rem.constraint_name(+)
AND (rem.position IS NULL or loc.position=rem.position)
ORDER BY ac.constraint_name, loc.position""" % {'dblink': dblink}),
table_name=table_name, owner=schema)
constraint_data = rp.fetchall()
return constraint_data
def get_primary_keys(self, connection, table_name, schema=None, **kw):
"""
kw arguments can be:
oracle_resolve_synonyms
dblink
"""
return self._get_primary_keys(connection, table_name, schema, **kw)[0]
@reflection.cache
def _get_primary_keys(self, connection, table_name, schema=None, **kw):
resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
dblink = kw.get('dblink', '')
info_cache = kw.get('info_cache')
(table_name, schema, dblink, synonym) = \
self._prepare_reflection_args(connection, table_name, schema,
resolve_synonyms, dblink,
info_cache=info_cache)
pkeys = []
constraint_name = None
constraint_data = self._get_constraint_data(connection, table_name,
schema, dblink,
info_cache=kw.get('info_cache'))
for row in constraint_data:
#print "ROW:" , row
(cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \
row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]])
if cons_type == 'P':
if constraint_name is None:
constraint_name = self.normalize_name(cons_name)
pkeys.append(local_column)
return pkeys, constraint_name
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
cols, name = self._get_primary_keys(connection, table_name, schema=schema, **kw)
return {
'constrained_columns':cols,
'name':name
}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
"""
kw arguments can be:
oracle_resolve_synonyms
dblink
"""
requested_schema = schema # to check later on
resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
dblink = kw.get('dblink', '')
info_cache = kw.get('info_cache')
(table_name, schema, dblink, synonym) = \
self._prepare_reflection_args(connection, table_name, schema,
resolve_synonyms, dblink,
info_cache=info_cache)
constraint_data = self._get_constraint_data(connection, table_name,
schema, dblink,
info_cache=kw.get('info_cache'))
def fkey_rec():
return {
'name' : None,
'constrained_columns' : [],
'referred_schema' : None,
'referred_table' : None,
'referred_columns' : []
}
fkeys = util.defaultdict(fkey_rec)
for row in constraint_data:
(cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \
row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]])
if cons_type == 'R':
if remote_table is None:
# ticket 363
util.warn(
("Got 'None' querying 'table_name' from "
"all_cons_columns%(dblink)s - does the user have "
"proper rights to the table?") % {'dblink':dblink})
continue
rec = fkeys[cons_name]
rec['name'] = cons_name
local_cols, remote_cols = rec['constrained_columns'], rec['referred_columns']
if not rec['referred_table']:
if resolve_synonyms:
ref_remote_name, ref_remote_owner, ref_dblink, ref_synonym = \
self._resolve_synonym(
connection,
desired_owner=self.denormalize_name(remote_owner),
desired_table=self.denormalize_name(remote_table)
)
if ref_synonym:
remote_table = self.normalize_name(ref_synonym)
remote_owner = self.normalize_name(ref_remote_owner)
rec['referred_table'] = remote_table
if requested_schema is not None or self.denormalize_name(remote_owner) != schema:
rec['referred_schema'] = remote_owner
local_cols.append(local_column)
remote_cols.append(remote_column)
return fkeys.values()
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None,
resolve_synonyms=False, dblink='', **kw):
info_cache = kw.get('info_cache')
(view_name, schema, dblink, synonym) = \
self._prepare_reflection_args(connection, view_name, schema,
resolve_synonyms, dblink,
info_cache=info_cache)
s = sql.text("""
SELECT text FROM all_views
WHERE owner = :schema
AND view_name = :view_name
""")
rp = connection.execute(s,
view_name=view_name, schema=schema).scalar()
if rp:
return rp.decode(self.encoding)
else:
return None
class _OuterJoinColumn(sql.ClauseElement):
__visit_name__ = 'outer_join_column'
def __init__(self, column):
self.column = column
|
|
"""Fuzzy Linear Discriminant Analysis"""
import numpy as np
import pandas as pd
from collections import Counter
from sklearn.utils.validation import check_is_fitted
from scipy import linalg
from sklearn.decomposition import PCA
from collections import Counter
from sklearn.utils.extmath import softmax
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.linear_model._base import LinearClassifierMixin # if you don't have sklearn.linear_model._base, upgrade scikit-learn to latest (e.g. scikit-learn==0.24.2)
EPSILON = 1E-6
def class_freqs_df(y):
cc_df = pd.DataFrame.from_dict(y).fillna(value=0.)
# tot = cc_df.values.sum()
res = cc_df.sum(axis=0)
res /= res.sum()
# res.columns = ['class_freqs'] # apparently, not needed
return res
def rescale_dict(dictionary, scalar):
res_dict = {}
res_dict.update((key, value * float(scalar)) for (key, value) in list(dictionary.items()))
return res_dict
def class_freqs(y, sample_weight):
"""Returns a dict of class frequencies
"""
weighted_dict = []
for scalar, dictionary in zip(sample_weight, y):
weighted_dict.append(rescale_dict(dictionary, scalar))
cc = Counter()
for item in weighted_dict:
cc.update(item)
tot_weight = np.sum(sample_weight)
return rescale_dict(cc, 1. / tot_weight)
def get_projection_dimension(X, tol=1e-08):
pca = PCA(n_components=X.shape[1])
pca.fit(X)
cov = pca.get_covariance()
eigs, _ = np.linalg.eig(cov)
n_dim_proj = np.sum(np.abs(eigs) > tol)
return n_dim_proj
def _preprocess(X, y, sample_weight):
all_labels = list(set().union(*(list(d.keys()) for d in y)))
return pd.DataFrame(X), pd.DataFrame.from_dict(list(y)).fillna(value=0.), \
pd.Series(sample_weight, name='X_weight'), \
all_labels, \
len(all_labels)
def lda_decision_function(class_freqs, class_means, covar, X):
covar /= (X.shape[0] - class_means.shape[0])
right_term = np.dot(np.linalg.inv(covar), class_means.T)
linear_term = np.dot(X, right_term)
bilin_term = np.diagonal(0.5 * np.dot(class_means, right_term))
log_term = np.log(class_freqs)
return linear_term - bilin_term + log_term
class FuzzyLDA(BaseEstimator, LinearClassifierMixin, TransformerMixin):
def __init__(self, solver='eigen', n_components=None):
self.solver = solver
self.n_components = n_components
def _solve_eigen(self, X, y, sample_weight):
"""
Eigenvalue solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
sample_weight : array-like, shape (n_samples,)
y : list of dicts, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,)
"""
self.means_ = self._class_means(X, y, sample_weight)
self.covariance_ = self.within_scatter_matrix(X, y, sample_weight)
Sw = self.covariance_ # within scatter
Sb = self.between_classes_scatter(X, y, sample_weight) # between scatter
if np.linalg.matrix_rank(Sw) < Sw.shape[0]:
Sw += EPSILON * np.eye(Sw.shape[0])
evals, evecs = linalg.eig(Sb, Sw, right=True) # eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals)
)[::-1][:self._max_components]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
evecs /= np.linalg.norm(evecs, axis=0)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
# self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + TODO
# np.log(self.priors_))
def _class_means(self, X, y, sample_weight):
"""
Compute weighted class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : list of dicts, shape (n_samples,)
Labels.
sample_weight : array-like (n_samples)
Weights of the data points.
Returns
-------
means : dict, of the form {'label': n_features}
Weighted means for each class.
"""
means = []
for index, label in enumerate(self.all_labels):
means.append(
(self.X_df.mul(self.df_weights, axis=0)).mul(self.y_df[label], axis=0)[self.y_df[label] > 0.].mean(
axis=0))
means_array = np.array(means)
self.means_df = pd.DataFrame(means_array, index=self.all_labels)
return means
def _class_weights(self, X, y, sample_weight):
"""
Compute total weights for each class.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : list of dicts, shape (n_samples,)
Labels.
sample_weight : array-like (n_samples)
Weights of the data points.
Returns
-------
class_weights : list
Total weight for each class.
"""
weights = []
for index, label in enumerate(self.all_labels):
weights.append(self.df_weights.mul(self.y_df[label], axis=0)[self.y_df[label] > 0.].sum(axis=0))
weights_array = np.array(weights)
return weights_array
def within_scatter_matrix(self, X, y, sample_weight):
"""computes the within scatter matrix S_w
"""
within_scatter_matrix = []
for label in self.all_labels:
within_scatter_matrix.append(
np.cov((self.X_df[self.y_df[label] > 0.].mul(np.sqrt(self.df_weights[self.y_df[label] > 0.]), axis=0)) \
.mul(np.sqrt(self.y_df[label][self.y_df[label] > 0.]), axis=0).values.T, bias=1))
return np.array(within_scatter_matrix).mean(axis=0)
def within_scatter_matrix_list(self, X, y, sample_weight):
"""computes the within scatter matrix S_w
"""
within_scatter_matrix = []
for label in self.all_labels:
within_scatter_matrix.append(
np.cov((self.X_df[self.y_df[label] > 0.].mul(np.sqrt(self.df_weights[self.y_df[label] > 0.]), axis=0)) \
.mul(np.sqrt(self.y_df[label][self.y_df[label] > 0.]), axis=0).values.T, bias=1))
return np.array(within_scatter_matrix)
def between_classes_scatter(self, X, y, sample_weight):
overall_mean = X.mean(axis=0)
mean_vectors = pd.DataFrame(self._class_means(X, y, sample_weight), index=self.all_labels)
mean_vectors -= overall_mean
sq_weights = np.sqrt(self.class_weights_df)[0]
res = mean_vectors.mul(np.sqrt(self.class_weights_df)[0], axis='index')
Sb_list = []
for label in self.all_labels:
Sb_list.append(np.outer(res.loc[label].values, res.loc[label].values))
# Sb = np.cov((res).values.T, bias =1)
self.Sb = np.sum(Sb_list, axis=0)
return np.sum(Sb_list, axis=0)
def fit(self, X, y, sample_weight=None):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : list of dicts, shape (n_samples,)
Labels.
sample_weight : array-like (n_samples)
Weights of the data points.
"""
if sample_weight is None:
sample_weight = np.ones(len(y))
if not isinstance(y[0], dict): # check if the first element is a dict
# if it's not, then go through every element and replace with {element: 1.0} if element not a dict
new_y = list()
for yy in y:
if not isinstance(yy, dict):
new_y.append({yy: 1.0})
else:
new_y.append(yy)
y = np.array(new_y)
self.classes_ = list(set().union(*(list(d.keys()) for d in y)))
self.X_df, self.y_df, self.df_weights, self.all_labels, self.num_labels = \
_preprocess(X, y, sample_weight)
self.class_weights_df = pd.DataFrame(self._class_weights(X, y, sample_weight), index=self.all_labels)
self.class_freqs_df = class_freqs_df(y)
# Get the maximum number of components
n_dimensions = get_projection_dimension(X)
if self.n_components is None:
self._max_components = min(len(self.classes_) - 1, n_dimensions)
elif self.n_components <= len(self.classes_) - 1:
self._max_components = min(self.n_components, n_dimensions)
else:
self._max_components = min(self.n_components, n_dimensions)
self.extract_more_dim(X, y, sample_weight, self._max_components)
if (self.solver == 'None' or self.solver == 'eigen'):
self._solve_eigen(X, y, sample_weight)
else:
raise ValueError("unknown solver {} (valid solvers are None, "
"and 'eigen').".format(self.solver))
if len(self.classes_) == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
# self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
# ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, ['scalings_'], all_or_any=any)
X_new = np.dot(X, self.scalings_)
return X_new[:, :self._max_components] # done
def predict_proba(self, X):
"""Assign new point to a class.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
probas : array, shape (n_samples, n_classes)
Predicted probas.
"""
check_is_fitted(self, ['scalings_'], all_or_any=any)
covar = np.array(self.covariance_)
class_means = np.array(self.means_df)
class_freqs = np.array(self.class_freqs_df)
prob = lda_decision_function(class_freqs, class_means, covar, X)
self.prob = prob # Testing purposes
# y_predict = np.argmax(lda_decision_function(class_freqs, class_means, covar, X), axis = 1)
# np.exp(prob, prob)
# prob += 1
# np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob = softmax(prob)
return prob
def predict(self, X, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones(len(X))
return np.argmax(self.predict_proba(X, sample_weight), axis=1)
def extract_more_dim(self, X, y, sample_weight, n_dim):
assert len(X[0]) >= n_dim, "n_dim cannot be larger than the number of features"
n_labels = len(self.classes_)
n_projections, remainder = divmod(n_dim, n_labels - 1)
scalings = list()
while n_projections > 0:
n_projections -= 1
FLDA = FuzzyLDA(n_components=n_labels - 1)
FLDA.fit(X, y, sample_weight)
X = X - np.dot(np.dot(X, FLDA.scalings_), np.transpose(FLDA.scalings_))
scalings.append(FLDA.scalings_[:, :n_labels - 1])
if remainder > 0:
FLDA_remainder = FuzzyLDA(n_components=remainder)
FLDA_remainder.fit(X, y, sample_weight)
scalings.append(FLDA_remainder.scalings_[:, :remainder])
self.scalings_ = np.hstack(scalings)
def sphericize(X, y, sample_weight=None):
"""Make the dataset spherical.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : list of dicts, shape (n_samples,)
Labels.
sample_weight : array-like (n_samples)
Weights of the data points.
"""
if sample_weight is None:
sample_weight = np.ones(len(y))
fuz = FuzzyLDA().fit(X, y, sample_weight)
W = fuz.within_scatter_matrix(X, y, sample_weight)
eigenvals, eigenvecs = np.linalg.eig(W)
D = np.diag(1 / np.sqrt(eigenvals))
P = np.dot(eigenvecs, D)
return np.dot(X, P)
|
|
from django.shortcuts import get_object_or_404, render
from django.http import Http404
from wagtail.utils.pagination import paginate
from wagtail.wagtailadmin.modal_workflow import render_modal_workflow
from wagtail.wagtailadmin.forms import (
SearchForm, ExternalLinkChooserForm, ExternalLinkChooserWithLinkTextForm,
EmailLinkChooserForm, EmailLinkChooserWithLinkTextForm)
from wagtail.wagtailcore.models import Page
from wagtail.wagtailcore.utils import resolve_model_string
def shared_context(request, extra_context={}):
context = {
'allow_external_link': request.GET.get('allow_external_link'),
'allow_email_link': request.GET.get('allow_email_link'),
}
context.update(extra_context)
return context
def page_models_from_string(string):
page_models = []
for sub_string in string.split(','):
page_model = resolve_model_string(sub_string)
if not issubclass(page_model, Page):
raise ValueError("Model is not a page")
page_models.append(page_model)
return tuple(page_models)
def filter_page_type(queryset, page_models):
qs = queryset.none()
for model in page_models:
qs |= queryset.type(model)
return qs
def browse(request, parent_page_id=None):
# Find parent page
if parent_page_id:
parent_page = get_object_or_404(Page, id=parent_page_id)
else:
parent_page = Page.get_first_root_node()
# Get children of parent page
pages = parent_page.get_children()
# Filter them by page type
# A missing or empty page_type parameter indicates 'all page types' (i.e. descendants of wagtailcore.page)
page_type_string = request.GET.get('page_type') or 'wagtailcore.page'
if page_type_string != 'wagtailcore.page':
try:
desired_classes = page_models_from_string(page_type_string)
except (ValueError, LookupError):
raise Http404
# restrict the page listing to just those pages that:
# - are of the given content type (taking into account class inheritance)
# - or can be navigated into (i.e. have children)
choosable_pages = filter_page_type(pages, desired_classes)
descendable_pages = pages.filter(numchild__gt=0)
pages = choosable_pages | descendable_pages
else:
desired_classes = (Page, )
can_choose_root = request.GET.get('can_choose_root', False)
# Parent page can be chosen if it is a instance of desired_classes
parent_page.can_choose = (
issubclass(parent_page.specific_class or Page, desired_classes) and
(can_choose_root or not parent_page.is_root())
)
# Pagination
# We apply pagination first so we don't need to walk the entire list
# in the block below
paginator, pages = paginate(request, pages, per_page=25)
# Annotate each page with can_choose/can_decend flags
for page in pages:
if desired_classes == (Page, ):
page.can_choose = True
else:
page.can_choose = issubclass(page.specific_class or Page, desired_classes)
page.can_descend = page.get_children_count()
# Render
return render_modal_workflow(
request,
'wagtailadmin/chooser/browse.html', 'wagtailadmin/chooser/browse.js',
shared_context(request, {
'parent_page': parent_page,
'pages': pages,
'search_form': SearchForm(),
'page_type_string': page_type_string,
'page_type_names': [desired_class.get_verbose_name() for desired_class in desired_classes],
'page_types_restricted': (page_type_string != 'wagtailcore.page')
})
)
def search(request, parent_page_id=None):
# A missing or empty page_type parameter indicates 'all page types' (i.e. descendants of wagtailcore.page)
page_type_string = request.GET.get('page_type') or 'wagtailcore.page'
try:
desired_classes = page_models_from_string(page_type_string)
except (ValueError, LookupError):
raise Http404
search_form = SearchForm(request.GET)
if search_form.is_valid() and search_form.cleaned_data['q']:
pages = Page.objects.exclude(
depth=1 # never include root
)
pages = filter_page_type(pages, desired_classes)
pages = pages.search(search_form.cleaned_data['q'], fields=['title'])
else:
pages = Page.objects.none()
paginator, pages = paginate(request, pages, per_page=25)
for page in pages:
page.can_choose = True
return render(
request, 'wagtailadmin/chooser/_search_results.html',
shared_context(request, {
'searchform': search_form,
'pages': pages,
'page_type_string': page_type_string,
})
)
def external_link(request):
prompt_for_link_text = bool(request.GET.get('prompt_for_link_text'))
if prompt_for_link_text:
form_class = ExternalLinkChooserWithLinkTextForm
else:
form_class = ExternalLinkChooserForm
if request.POST:
form = form_class(request.POST)
if form.is_valid():
return render_modal_workflow(
request,
None, 'wagtailadmin/chooser/external_link_chosen.js',
{
'url': form.cleaned_data['url'],
'link_text': form.cleaned_data['link_text'] if prompt_for_link_text else form.cleaned_data['url']
}
)
else:
form = form_class()
return render_modal_workflow(
request,
'wagtailadmin/chooser/external_link.html', 'wagtailadmin/chooser/external_link.js',
shared_context(request, {
'form': form,
})
)
def email_link(request):
prompt_for_link_text = bool(request.GET.get('prompt_for_link_text'))
if prompt_for_link_text:
form_class = EmailLinkChooserWithLinkTextForm
else:
form_class = EmailLinkChooserForm
if request.POST:
form = form_class(request.POST)
if form.is_valid():
return render_modal_workflow(
request,
None, 'wagtailadmin/chooser/external_link_chosen.js',
{
'url': 'mailto:' + form.cleaned_data['email_address'],
'link_text': form.cleaned_data['link_text'] if (
prompt_for_link_text and form.cleaned_data['link_text']
) else form.cleaned_data['email_address']
}
)
else:
form = form_class()
return render_modal_workflow(
request,
'wagtailadmin/chooser/email_link.html', 'wagtailadmin/chooser/email_link.js',
shared_context(request, {
'form': form,
})
)
|
|
# file openpyxl/style.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
"""Style and formatting option tracking."""
# Python stdlib imports
import re
try:
from hashlib import md5
except ImportError:
from md5 import md5
class HashableObject(object):
"""Define how to hash property classes."""
__fields__ = None
__leaf__ = False
def __repr__(self):
return ':'.join([repr(getattr(self, x)) for x in self.__fields__])
def __hash__(self):
# return int(md5(repr(self)).hexdigest(), 16)
return hash(repr(self))
class Color(HashableObject):
"""Named colors for use in styles."""
BLACK = 'FF000000'
WHITE = 'FFFFFFFF'
RED = 'FFFF0000'
DARKRED = 'FF800000'
BLUE = 'FF0000FF'
DARKBLUE = 'FF000080'
GREEN = 'FF00FF00'
DARKGREEN = 'FF008000'
YELLOW = 'FFFFFF00'
DARKYELLOW = 'FF808000'
__fields__ = ('index',)
__slots__ = __fields__
__leaf__ = True
def __init__(self, index):
super(Color, self).__init__()
self.index = index
class Font(HashableObject):
"""Font options used in styles."""
UNDERLINE_NONE = 'none'
UNDERLINE_DOUBLE = 'double'
UNDERLINE_DOUBLE_ACCOUNTING = 'doubleAccounting'
UNDERLINE_SINGLE = 'single'
UNDERLINE_SINGLE_ACCOUNTING = 'singleAccounting'
__fields__ = ('name',
'size',
'bold',
'italic',
'superscript',
'subscript',
'underline',
'strikethrough',
'color')
__slots__ = __fields__
def __init__(self):
super(Font, self).__init__()
self.name = 'Calibri'
self.size = 11
self.bold = False
self.italic = False
self.superscript = False
self.subscript = False
self.underline = self.UNDERLINE_NONE
self.strikethrough = False
self.color = Color(Color.BLACK)
class Fill(HashableObject):
"""Area fill patterns for use in styles."""
FILL_NONE = 'none'
FILL_SOLID = 'solid'
FILL_GRADIENT_LINEAR = 'linear'
FILL_GRADIENT_PATH = 'path'
FILL_PATTERN_DARKDOWN = 'darkDown'
FILL_PATTERN_DARKGRAY = 'darkGray'
FILL_PATTERN_DARKGRID = 'darkGrid'
FILL_PATTERN_DARKHORIZONTAL = 'darkHorizontal'
FILL_PATTERN_DARKTRELLIS = 'darkTrellis'
FILL_PATTERN_DARKUP = 'darkUp'
FILL_PATTERN_DARKVERTICAL = 'darkVertical'
FILL_PATTERN_GRAY0625 = 'gray0625'
FILL_PATTERN_GRAY125 = 'gray125'
FILL_PATTERN_LIGHTDOWN = 'lightDown'
FILL_PATTERN_LIGHTGRAY = 'lightGray'
FILL_PATTERN_LIGHTGRID = 'lightGrid'
FILL_PATTERN_LIGHTHORIZONTAL = 'lightHorizontal'
FILL_PATTERN_LIGHTTRELLIS = 'lightTrellis'
FILL_PATTERN_LIGHTUP = 'lightUp'
FILL_PATTERN_LIGHTVERTICAL = 'lightVertical'
FILL_PATTERN_MEDIUMGRAY = 'mediumGray'
__fields__ = ('fill_type',
'rotation',
'start_color',
'end_color')
__slots__ = __fields__
def __init__(self):
super(Fill, self).__init__()
self.fill_type = self.FILL_NONE
self.rotation = 0
self.start_color = Color(Color.WHITE)
self.end_color = Color(Color.BLACK)
class Border(HashableObject):
"""Border options for use in styles."""
BORDER_NONE = 'none'
BORDER_DASHDOT = 'dashDot'
BORDER_DASHDOTDOT = 'dashDotDot'
BORDER_DASHED = 'dashed'
BORDER_DOTTED = 'dotted'
BORDER_DOUBLE = 'double'
BORDER_HAIR = 'hair'
BORDER_MEDIUM = 'medium'
BORDER_MEDIUMDASHDOT = 'mediumDashDot'
BORDER_MEDIUMDASHDOTDOT = 'mediumDashDotDot'
BORDER_MEDIUMDASHED = 'mediumDashed'
BORDER_SLANTDASHDOT = 'slantDashDot'
BORDER_THICK = 'thick'
BORDER_THIN = 'thin'
__fields__ = ('border_style',
'color')
__slots__ = __fields__
def __init__(self):
super(Border, self).__init__()
self.border_style = self.BORDER_NONE
self.color = Color(Color.BLACK)
class Borders(HashableObject):
"""Border positioning for use in styles."""
DIAGONAL_NONE = 0
DIAGONAL_UP = 1
DIAGONAL_DOWN = 2
DIAGONAL_BOTH = 3
__fields__ = ('left',
'right',
'top',
'bottom',
'diagonal',
'diagonal_direction',
'all_borders',
'outline',
'inside',
'vertical',
'horizontal')
__slots__ = __fields__
def __init__(self):
super(Borders, self).__init__()
self.left = Border()
self.right = Border()
self.top = Border()
self.bottom = Border()
self.diagonal = Border()
self.diagonal_direction = self.DIAGONAL_NONE
self.all_borders = Border()
self.outline = Border()
self.inside = Border()
self.vertical = Border()
self.horizontal = Border()
class Alignment(HashableObject):
"""Alignment options for use in styles."""
HORIZONTAL_GENERAL = 'general'
HORIZONTAL_LEFT = 'left'
HORIZONTAL_RIGHT = 'right'
HORIZONTAL_CENTER = 'center'
HORIZONTAL_CENTER_CONTINUOUS = 'centerContinuous'
HORIZONTAL_JUSTIFY = 'justify'
VERTICAL_BOTTOM = 'bottom'
VERTICAL_TOP = 'top'
VERTICAL_CENTER = 'center'
VERTICAL_JUSTIFY = 'justify'
__fields__ = ('horizontal',
'vertical',
'text_rotation',
'wrap_text',
'shrink_to_fit',
'indent')
__slots__ = __fields__
__leaf__ = True
def __init__(self):
super(Alignment, self).__init__()
self.horizontal = self.HORIZONTAL_GENERAL
self.vertical = self.VERTICAL_BOTTOM
self.text_rotation = 0
self.wrap_text = False
self.shrink_to_fit = False
self.indent = 0
class NumberFormat(HashableObject):
"""Numer formatting for use in styles."""
FORMAT_GENERAL = 'General'
FORMAT_TEXT = '@'
FORMAT_NUMBER = '0'
FORMAT_NUMBER_00 = '0.00'
FORMAT_NUMBER_COMMA_SEPARATED1 = '#,##0.00'
FORMAT_NUMBER_COMMA_SEPARATED2 = '#,##0.00_-'
FORMAT_PERCENTAGE = '0%'
FORMAT_PERCENTAGE_00 = '0.00%'
FORMAT_DATE_YYYYMMDD2 = 'yyyy-mm-dd'
FORMAT_DATE_YYYYMMDD = 'yy-mm-dd'
FORMAT_DATE_DDMMYYYY = 'dd/mm/yy'
FORMAT_DATE_DMYSLASH = 'd/m/y'
FORMAT_DATE_DMYMINUS = 'd-m-y'
FORMAT_DATE_DMMINUS = 'd-m'
FORMAT_DATE_MYMINUS = 'm-y'
FORMAT_DATE_XLSX14 = 'mm-dd-yy'
FORMAT_DATE_XLSX15 = 'd-mmm-yy'
FORMAT_DATE_XLSX16 = 'd-mmm'
FORMAT_DATE_XLSX17 = 'mmm-yy'
FORMAT_DATE_XLSX22 = 'm/d/yy h:mm'
FORMAT_DATE_DATETIME = 'd/m/y h:mm'
FORMAT_DATE_TIME1 = 'h:mm AM/PM'
FORMAT_DATE_TIME2 = 'h:mm:ss AM/PM'
FORMAT_DATE_TIME3 = 'h:mm'
FORMAT_DATE_TIME4 = 'h:mm:ss'
FORMAT_DATE_TIME5 = 'mm:ss'
FORMAT_DATE_TIME6 = 'h:mm:ss'
FORMAT_DATE_TIME7 = 'i:s.S'
FORMAT_DATE_TIME8 = 'h:mm:ss@'
FORMAT_DATE_YYYYMMDDSLASH = 'yy/mm/dd@'
FORMAT_CURRENCY_USD_SIMPLE = '"$"#,##0.00_-'
FORMAT_CURRENCY_USD = '$#,##0_-'
FORMAT_CURRENCY_EUR_SIMPLE = '[$EUR ]#,##0.00_-'
_BUILTIN_FORMATS = {
0: 'General',
1: '0',
2: '0.00',
3: '#,##0',
4: '#,##0.00',
9: '0%',
10: '0.00%',
11: '0.00E+00',
12: '# ?/?',
13: '# ??/??',
14: 'mm-dd-yy',
15: 'd-mmm-yy',
16: 'd-mmm',
17: 'mmm-yy',
18: 'h:mm AM/PM',
19: 'h:mm:ss AM/PM',
20: 'h:mm',
21: 'h:mm:ss',
22: 'm/d/yy h:mm',
37: '#,##0 (#,##0)',
38: '#,##0 [Red](#,##0)',
39: '#,##0.00(#,##0.00)',
40: '#,##0.00[Red](#,##0.00)',
41: '_(* #,##0_);_(* \(#,##0\);_(* "-"_);_(@_)',
42: '_("$"* #,##0_);_("$"* \(#,##0\);_("$"* "-"_);_(@_)',
43: '_(* #,##0.00_);_(* \(#,##0.00\);_(* "-"??_);_(@_)',
44: '_("$"* #,##0.00_)_("$"* \(#,##0.00\)_("$"* "-"??_)_(@_)',
45: 'mm:ss',
46: '[h]:mm:ss',
47: 'mmss.0',
48: '##0.0E+0',
49: '@', }
_BUILTIN_FORMATS_REVERSE = dict(
[(value, key) for key, value in _BUILTIN_FORMATS.items()])
__fields__ = ('_format_code',
'_format_index')
__slots__ = __fields__
__leaf__ = True
DATE_INDICATORS = 'dmyhs'
def __init__(self):
super(NumberFormat, self).__init__()
self._format_code = self.FORMAT_GENERAL
self._format_index = 0
def _set_format_code(self, format_code = FORMAT_GENERAL):
"""Setter for the format_code property."""
self._format_code = format_code
self._format_index = self.builtin_format_id(format = format_code)
def _get_format_code(self):
"""Getter for the format_code property."""
return self._format_code
format_code = property(_get_format_code, _set_format_code)
def builtin_format_code(self, index):
"""Return one of the standard format codes by index."""
return self._BUILTIN_FORMATS[index]
def is_builtin(self, format = None):
"""Check if a format code is a standard format code."""
if format is None:
format = self._format_code
return format in self._BUILTIN_FORMATS.values()
def builtin_format_id(self, format):
"""Return the id of a standard style."""
return self._BUILTIN_FORMATS_REVERSE.get(format, None)
def is_date_format(self, format = None):
"""Check if the number format is actually representing a date."""
if format is None:
format = self._format_code
return any([x in format for x in self.DATE_INDICATORS])
class Protection(HashableObject):
"""Protection options for use in styles."""
PROTECTION_INHERIT = 'inherit'
PROTECTION_PROTECTED = 'protected'
PROTECTION_UNPROTECTED = 'unprotected'
__fields__ = ('locked',
'hidden')
__slots__ = __fields__
__leaf__ = True
def __init__(self):
super(Protection, self).__init__()
self.locked = self.PROTECTION_INHERIT
self.hidden = self.PROTECTION_INHERIT
class Style(HashableObject):
"""Style object containing all formatting details."""
__fields__ = ('font',
'fill',
'borders',
'alignment',
'number_format',
'protection')
__slots__ = __fields__
def __init__(self):
super(Style, self).__init__()
self.font = Font()
self.fill = Fill()
self.borders = Borders()
self.alignment = Alignment()
self.number_format = NumberFormat()
self.protection = Protection()
DEFAULTS = Style()
|
|
#
# Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for scripts/common.py."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import contextlib
import getpass
import http.server
import os
import re
import shutil
import socketserver
import stat
import subprocess
import sys
import tempfile
from core.tests import test_utils
import psutil
import python_utils
import release_constants
from . import common
_PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
_PY_GITHUB_PATH = os.path.join(
_PARENT_DIR, 'oppia_tools', 'PyGithub-%s' % common.PYGITHUB_VERSION)
sys.path.insert(0, _PY_GITHUB_PATH)
import github # isort:skip pylint: disable=wrong-import-position
class MockPsutilProcess(python_utils.OBJECT):
"""A mock class for Process class in Psutil."""
cmdlines = [
['dev_appserver.py', '--host', '0.0.0.0', '--port', '9001'],
['downloads']
]
def __init__(self, index):
"""Constructor for this mock object.
Args:
index: int. The index of process to be checked.
"""
self.index = index
def cmdline(self):
"""Return the command line of this process."""
pass
def kill(self):
"""Kill the process."""
pass
def is_running(self):
"""Check whether the function is running."""
return True
class CommonTests(test_utils.GenericTestBase):
"""Test the methods which handle common functionalities."""
def test_is_x64_architecture_in_x86(self):
maxsize_swap = self.swap(sys, 'maxsize', 1)
with maxsize_swap:
self.assertFalse(common.is_x64_architecture())
def test_is_x64_architecture_in_x64(self):
maxsize_swap = self.swap(sys, 'maxsize', 2**32 + 1)
with maxsize_swap:
self.assertTrue(common.is_x64_architecture())
def test_run_cmd(self):
self.assertEqual(
common.run_cmd(('echo Test for common.py ').split(' ')),
'Test for common.py')
def test_ensure_directory_exists_with_existing_dir(self):
check_function_calls = {
'makedirs_gets_called': False
}
def mock_makedirs(unused_dirpath):
check_function_calls['makedirs_gets_called'] = True
with self.swap(os, 'makedirs', mock_makedirs):
common.ensure_directory_exists('assets')
self.assertEqual(check_function_calls, {'makedirs_gets_called': False})
def test_ensure_directory_exists_with_non_existing_dir(self):
check_function_calls = {
'makedirs_gets_called': False
}
def mock_makedirs(unused_dirpath):
check_function_calls['makedirs_gets_called'] = True
with self.swap(os, 'makedirs', mock_makedirs):
common.ensure_directory_exists('test-dir')
self.assertEqual(check_function_calls, {'makedirs_gets_called': True})
def test_require_cwd_to_be_oppia_with_correct_cwd_and_unallowed_deploy_dir(
self):
common.require_cwd_to_be_oppia()
def test_require_cwd_to_be_oppia_with_correct_cwd_and_allowed_deploy_dir(
self):
common.require_cwd_to_be_oppia(allow_deploy_dir=True)
def test_require_cwd_to_be_oppia_with_wrong_cwd_and_unallowed_deploy_dir(
self):
def mock_getcwd():
return 'invalid'
getcwd_swap = self.swap(os, 'getcwd', mock_getcwd)
with getcwd_swap, self.assertRaisesRegexp(
Exception, 'Please run this script from the oppia/ directory.'):
common.require_cwd_to_be_oppia()
def test_require_cwd_to_be_oppia_with_wrong_cwd_and_allowed_deploy_dir(
self):
def mock_getcwd():
return 'invalid'
def mock_basename(unused_dirpath):
return 'deploy-dir'
def mock_isdir(unused_dirpath):
return True
getcwd_swap = self.swap(os, 'getcwd', mock_getcwd)
basename_swap = self.swap(os.path, 'basename', mock_basename)
isdir_swap = self.swap(os.path, 'isdir', mock_isdir)
with getcwd_swap, basename_swap, isdir_swap:
common.require_cwd_to_be_oppia(allow_deploy_dir=True)
def test_open_new_tab_in_browser_if_possible_with_user_manually_opening_url(
self):
try:
check_function_calls = {
'input_gets_called': 0,
'check_call_gets_called': False
}
expected_check_function_calls = {
'input_gets_called': 1,
'check_call_gets_called': False
}
def mock_call(unused_cmd_tokens):
return 0
def mock_check_call(unused_cmd_tokens):
check_function_calls['check_call_gets_called'] = True
def mock_input():
check_function_calls['input_gets_called'] += 1
return 'n'
call_swap = self.swap(subprocess, 'call', mock_call)
check_call_swap = self.swap(
subprocess, 'check_call', mock_check_call)
input_swap = self.swap(python_utils, 'INPUT', mock_input)
with call_swap, check_call_swap, input_swap:
common.open_new_tab_in_browser_if_possible('test-url')
self.assertEqual(
check_function_calls, expected_check_function_calls)
finally:
common.USER_PREFERENCES['open_new_tab_in_browser'] = None
def test_open_new_tab_in_browser_if_possible_with_url_opening_correctly(
self):
try:
check_function_calls = {
'input_gets_called': 0,
'check_call_gets_called': False
}
expected_check_function_calls = {
'input_gets_called': 1,
'check_call_gets_called': True
}
def mock_call(unused_cmd_tokens):
return 0
def mock_check_call(unused_cmd_tokens):
check_function_calls['check_call_gets_called'] = True
def mock_input():
check_function_calls['input_gets_called'] += 1
return 'y'
call_swap = self.swap(subprocess, 'call', mock_call)
check_call_swap = self.swap(
subprocess, 'check_call', mock_check_call)
input_swap = self.swap(python_utils, 'INPUT', mock_input)
with call_swap, check_call_swap, input_swap:
common.open_new_tab_in_browser_if_possible('test-url')
self.assertEqual(
check_function_calls, expected_check_function_calls)
finally:
common.USER_PREFERENCES['open_new_tab_in_browser'] = None
def test_open_new_tab_in_browser_if_possible_with_url_not_opening_correctly(
self):
try:
check_function_calls = {
'input_gets_called': 0,
'check_call_gets_called': False
}
expected_check_function_calls = {
'input_gets_called': 2,
'check_call_gets_called': False
}
def mock_call(unused_cmd_tokens):
return 1
def mock_check_call(unused_cmd_tokens):
check_function_calls['check_call_gets_called'] = True
def mock_input():
check_function_calls['input_gets_called'] += 1
return 'y'
call_swap = self.swap(subprocess, 'call', mock_call)
check_call_swap = self.swap(
subprocess, 'check_call', mock_check_call)
input_swap = self.swap(python_utils, 'INPUT', mock_input)
with call_swap, check_call_swap, input_swap:
common.open_new_tab_in_browser_if_possible('test-url')
self.assertEqual(
check_function_calls, expected_check_function_calls)
finally:
common.USER_PREFERENCES['open_new_tab_in_browser'] = None
def test_get_remote_alias_with_correct_alias(self):
def mock_check_output(unused_cmd_tokens):
return 'remote1 url1\nremote2 url2'
with self.swap(
subprocess, 'check_output', mock_check_output):
self.assertEqual(common.get_remote_alias('url1'), 'remote1')
def test_get_remote_alias_with_incorrect_alias(self):
def mock_check_output(unused_cmd_tokens):
return 'remote1 url1\nremote2 url2'
check_output_swap = self.swap(
subprocess, 'check_output', mock_check_output)
with check_output_swap, self.assertRaisesRegexp(
Exception,
'ERROR: There is no existing remote alias for the url3 repo.'):
common.get_remote_alias('url3')
def test_verify_local_repo_is_clean_with_clean_repo(self):
def mock_check_output(unused_cmd_tokens):
return 'nothing to commit, working directory clean'
with self.swap(
subprocess, 'check_output', mock_check_output):
common.verify_local_repo_is_clean()
def test_verify_local_repo_is_clean_with_unclean_repo(self):
def mock_check_output(unused_cmd_tokens):
return 'invalid'
check_output_swap = self.swap(
subprocess, 'check_output', mock_check_output)
with check_output_swap, self.assertRaisesRegexp(
Exception, 'ERROR: This script should be run from a clean branch.'):
common.verify_local_repo_is_clean()
def test_get_current_branch_name(self):
def mock_check_output(unused_cmd_tokens):
return 'On branch test'
with self.swap(
subprocess, 'check_output', mock_check_output):
self.assertEqual(common.get_current_branch_name(), 'test')
def test_get_current_release_version_number_with_non_hotfix_branch(self):
self.assertEqual(
common.get_current_release_version_number('release-1.2.3'), '1.2.3')
def test_get_current_release_version_number_with_hotfix_branch(self):
self.assertEqual(
common.get_current_release_version_number('release-1.2.3-hotfix-1'),
'1.2.3')
def test_get_current_release_version_number_with_maintenance_branch(self):
self.assertEqual(
common.get_current_release_version_number(
'release-maintenance-1.2.3'), '1.2.3')
def test_get_current_release_version_number_with_invalid_branch(self):
with self.assertRaisesRegexp(
Exception, 'Invalid branch name: invalid-branch.'):
common.get_current_release_version_number('invalid-branch')
def test_is_current_branch_a_hotfix_branch_with_non_hotfix_branch(self):
def mock_check_output(unused_cmd_tokens):
return 'On branch release-1.2.3'
with self.swap(
subprocess, 'check_output', mock_check_output):
self.assertEqual(common.is_current_branch_a_hotfix_branch(), False)
def test_is_current_branch_a_hotfix_branch_with_hotfix_branch(self):
def mock_check_output(unused_cmd_tokens):
return 'On branch release-1.2.3-hotfix-1'
with self.swap(
subprocess, 'check_output', mock_check_output):
self.assertEqual(common.is_current_branch_a_hotfix_branch(), True)
def test_is_current_branch_a_release_branch_with_release_branch(self):
def mock_check_output(unused_cmd_tokens):
return 'On branch release-1.2.3'
with self.swap(
subprocess, 'check_output', mock_check_output):
self.assertEqual(common.is_current_branch_a_release_branch(), True)
def test_is_current_branch_a_release_branch_with_hotfix_branch(self):
def mock_check_output(unused_cmd_tokens):
return 'On branch release-1.2.3-hotfix-1'
with self.swap(
subprocess, 'check_output', mock_check_output):
self.assertEqual(common.is_current_branch_a_release_branch(), True)
def test_is_current_branch_a_release_branch_with_maintenance_branch(self):
def mock_check_output(unused_cmd_tokens):
return 'On branch release-maintenance-1.2.3'
with self.swap(
subprocess, 'check_output', mock_check_output):
self.assertEqual(common.is_current_branch_a_release_branch(), True)
def test_is_current_branch_a_release_branch_with_non_release_branch(self):
def mock_check_output(unused_cmd_tokens):
return 'On branch test'
with self.swap(
subprocess, 'check_output', mock_check_output):
self.assertEqual(common.is_current_branch_a_release_branch(), False)
def test_is_current_branch_a_test_branch_with_test_branch(self):
def mock_check_output(unused_cmd_tokens):
return 'On branch test-common'
with self.swap(
subprocess, 'check_output', mock_check_output):
self.assertEqual(common.is_current_branch_a_test_branch(), True)
def test_is_current_branch_a_test_branch_with_non_test_branch(self):
def mock_check_output(unused_cmd_tokens):
return 'On branch invalid-test'
with self.swap(
subprocess, 'check_output', mock_check_output):
self.assertEqual(common.is_current_branch_a_test_branch(), False)
def test_verify_current_branch_name_with_correct_branch(self):
def mock_check_output(unused_cmd_tokens):
return 'On branch test'
with self.swap(
subprocess, 'check_output', mock_check_output):
common.verify_current_branch_name('test')
def test_verify_current_branch_name_with_incorrect_branch(self):
def mock_check_output(unused_cmd_tokens):
return 'On branch invalid'
check_output_swap = self.swap(
subprocess, 'check_output', mock_check_output)
with check_output_swap, self.assertRaisesRegexp(
Exception,
'ERROR: This script can only be run from the "test" branch.'):
common.verify_current_branch_name('test')
def test_ensure_release_scripts_folder_exists_with_invalid_access(self):
process = subprocess.Popen(['test'], stdout=subprocess.PIPE)
def mock_isdir(unused_dirpath):
return False
def mock_chdir(unused_dirpath):
pass
def mock_popen(unused_cmd, stdin, stdout, stderr): # pylint: disable=unused-argument
return process
def mock_communicate(unused_self):
return ('Output', 'Invalid')
isdir_swap = self.swap(os.path, 'isdir', mock_isdir)
chdir_swap = self.swap(os, 'chdir', mock_chdir)
popen_swap = self.swap(subprocess, 'Popen', mock_popen)
communicate_swap = self.swap(
subprocess.Popen, 'communicate', mock_communicate)
with isdir_swap, chdir_swap, popen_swap, communicate_swap:
with self.assertRaisesRegexp(
Exception, (
'You need SSH access to GitHub. See the '
'"Check your SSH access" section here and follow the '
'instructions: '
'https://help.github.com/articles/'
'error-repository-not-found/#check-your-ssh-access')):
common.ensure_release_scripts_folder_exists_and_is_up_to_date()
def test_ensure_release_scripts_folder_exists_with_valid_access(self):
process = subprocess.Popen(['test'], stdout=subprocess.PIPE)
def mock_isdir(unused_dirpath):
return False
def mock_chdir(unused_dirpath):
pass
def mock_popen(unused_cmd, stdin, stdout, stderr): # pylint: disable=unused-argument
return process
def mock_communicate(unused_self):
return ('Output', 'You\'ve successfully authenticated!')
def mock_check_call(unused_cmd_tokens):
pass
def mock_verify_local_repo_is_clean():
pass
def mock_verify_current_branch_name(unused_branch_name):
pass
def mock_get_remote_alias(unused_url):
return 'remote'
def mock_ask_user_to_confirm(unused_msg):
pass
isdir_swap = self.swap(os.path, 'isdir', mock_isdir)
chdir_swap = self.swap(os, 'chdir', mock_chdir)
popen_swap = self.swap(subprocess, 'Popen', mock_popen)
communicate_swap = self.swap(
subprocess.Popen, 'communicate', mock_communicate)
check_call_swap = self.swap(
subprocess, 'check_call', mock_check_call)
verify_local_repo_swap = self.swap(
common, 'verify_local_repo_is_clean',
mock_verify_local_repo_is_clean)
verify_current_branch_name_swap = self.swap(
common, 'verify_current_branch_name',
mock_verify_current_branch_name)
get_remote_alias_swap = self.swap(
common, 'get_remote_alias', mock_get_remote_alias)
ask_user_swap = self.swap(
common, 'ask_user_to_confirm', mock_ask_user_to_confirm)
with isdir_swap, chdir_swap, popen_swap, communicate_swap:
with check_call_swap, verify_local_repo_swap, ask_user_swap:
with verify_current_branch_name_swap, get_remote_alias_swap:
(
common
.ensure_release_scripts_folder_exists_and_is_up_to_date(
))
def test_is_port_open(self):
self.assertFalse(common.is_port_open(4444))
handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(('', 4444), handler)
self.assertTrue(common.is_port_open(4444))
httpd.server_close()
def test_permissions_of_file(self):
root_temp_dir = tempfile.mkdtemp()
temp_dirpath = tempfile.mkdtemp(dir=root_temp_dir)
temp_file = tempfile.NamedTemporaryFile(dir=temp_dirpath)
temp_file.name = 'temp_file'
temp_file_path = os.path.join(temp_dirpath, 'temp_file')
with python_utils.open_file(temp_file_path, 'w') as f:
f.write('content')
common.recursive_chown(root_temp_dir, os.getuid(), -1)
common.recursive_chmod(root_temp_dir, 0o744)
for root, directories, filenames in os.walk(root_temp_dir):
for directory in directories:
self.assertEqual(
oct(stat.S_IMODE(
os.stat(os.path.join(root, directory)).st_mode)),
'0744')
self.assertEqual(
os.stat(os.path.join(root, directory)).st_uid, os.getuid())
for filename in filenames:
self.assertEqual(
oct(stat.S_IMODE(
os.stat(os.path.join(root, filename)).st_mode)), '0744')
self.assertEqual(
os.stat(os.path.join(root, filename)).st_uid, os.getuid())
shutil.rmtree(root_temp_dir)
def test_print_each_string_after_two_new_lines(self):
@contextlib.contextmanager
def _redirect_stdout(new_target):
"""Redirect stdout to the new target.
Args:
new_target: TextIOWrapper. The new target to which stdout is
redirected.
Yields:
TextIOWrapper. The new target.
"""
old_target = sys.stdout
sys.stdout = new_target
try:
yield new_target
finally:
sys.stdout = old_target
target_stdout = python_utils.string_io()
with _redirect_stdout(target_stdout):
common.print_each_string_after_two_new_lines([
'These', 'are', 'sample', 'strings.'])
self.assertEqual(
target_stdout.getvalue(), 'These\n\nare\n\nsample\n\nstrings.\n\n')
def test_install_npm_library(self):
def _mock_subprocess_check_call(unused_command):
"""Mocks subprocess.check_call() to create a temporary file instead
of the actual npm library.
"""
temp_file = tempfile.NamedTemporaryFile()
temp_file.name = 'temp_file'
with python_utils.open_file('temp_file', 'w') as f:
f.write('content')
self.assertTrue(os.path.exists('temp_file'))
temp_file.close()
self.assertFalse(os.path.exists('temp_file'))
with self.swap(subprocess, 'check_call', _mock_subprocess_check_call):
common.install_npm_library('library_name', 'version', 'path')
def test_ask_user_to_confirm(self):
def mock_input():
return 'Y'
with self.swap(python_utils, 'INPUT', mock_input):
common.ask_user_to_confirm('Testing')
def test_get_personal_access_token_with_valid_token(self):
def mock_getpass(prompt): # pylint: disable=unused-argument
return 'token'
with self.swap(getpass, 'getpass', mock_getpass):
self.assertEqual(common.get_personal_access_token(), 'token')
def test_get_personal_access_token_with_token_as_none(self):
def mock_getpass(prompt): # pylint: disable=unused-argument
return None
getpass_swap = self.swap(getpass, 'getpass', mock_getpass)
with getpass_swap, self.assertRaisesRegexp(
Exception,
'No personal access token provided, please set up a personal '
'access token at https://github.com/settings/tokens and re-run '
'the script'):
common.get_personal_access_token()
def test_closed_blocking_bugs_milestone_results_in_exception(self):
mock_repo = github.Repository.Repository(
requester='', headers='', attributes={}, completed='')
def mock_get_milestone(unused_self, number): # pylint: disable=unused-argument
return github.Milestone.Milestone(
requester='', headers='',
attributes={'state': 'closed'}, completed='')
get_milestone_swap = self.swap(
github.Repository.Repository, 'get_milestone', mock_get_milestone)
with get_milestone_swap, self.assertRaisesRegexp(
Exception, 'The blocking bug milestone is closed.'):
common.check_blocking_bug_issue_count(mock_repo)
def test_non_zero_blocking_bug_issue_count_results_in_exception(self):
mock_repo = github.Repository.Repository(
requester='', headers='', attributes={}, completed='')
def mock_open_tab(unused_url):
pass
def mock_get_milestone(unused_self, number): # pylint: disable=unused-argument
return github.Milestone.Milestone(
requester='', headers='',
attributes={'open_issues': 10, 'state': 'open'}, completed='')
get_milestone_swap = self.swap(
github.Repository.Repository, 'get_milestone', mock_get_milestone)
open_tab_swap = self.swap(
common, 'open_new_tab_in_browser_if_possible', mock_open_tab)
with get_milestone_swap, open_tab_swap, self.assertRaisesRegexp(
Exception, (
'There are 10 unresolved blocking bugs. Please '
'ensure that they are resolved before release '
'summary generation.')):
common.check_blocking_bug_issue_count(mock_repo)
def test_zero_blocking_bug_issue_count_results_in_no_exception(self):
mock_repo = github.Repository.Repository(
requester='', headers='', attributes={}, completed='')
def mock_get_milestone(unused_self, number): # pylint: disable=unused-argument
return github.Milestone.Milestone(
requester='', headers='',
attributes={'open_issues': 0, 'state': 'open'}, completed='')
with self.swap(
github.Repository.Repository, 'get_milestone', mock_get_milestone):
common.check_blocking_bug_issue_count(mock_repo)
def test_check_prs_for_current_release_are_released_with_no_unreleased_prs(
self):
mock_repo = github.Repository.Repository(
requester='', headers='', attributes={}, completed='')
pull1 = github.PullRequest.PullRequest(
requester='', headers='',
attributes={
'title': 'PR1', 'number': 1, 'labels': [
{'name': release_constants.LABEL_FOR_RELEASED_PRS},
{'name': release_constants.LABEL_FOR_CURRENT_RELEASE_PRS}]},
completed='')
pull2 = github.PullRequest.PullRequest(
requester='', headers='',
attributes={
'title': 'PR2', 'number': 2, 'labels': [
{'name': release_constants.LABEL_FOR_RELEASED_PRS},
{'name': release_constants.LABEL_FOR_CURRENT_RELEASE_PRS}]},
completed='')
label = github.Label.Label(
requester='', headers='',
attributes={
'name': release_constants.LABEL_FOR_CURRENT_RELEASE_PRS},
completed='')
def mock_get_issues(unused_self, state, labels): # pylint: disable=unused-argument
return [pull1, pull2]
def mock_get_label(unused_self, unused_name):
return [label]
get_issues_swap = self.swap(
github.Repository.Repository, 'get_issues', mock_get_issues)
get_label_swap = self.swap(
github.Repository.Repository, 'get_label', mock_get_label)
with get_issues_swap, get_label_swap:
common.check_prs_for_current_release_are_released(mock_repo)
def test_check_prs_for_current_release_are_released_with_unreleased_prs(
self):
mock_repo = github.Repository.Repository(
requester='', headers='', attributes={}, completed='')
def mock_open_tab(unused_url):
pass
pull1 = github.PullRequest.PullRequest(
requester='', headers='',
attributes={
'title': 'PR1', 'number': 1, 'labels': [
{'name': release_constants.LABEL_FOR_CURRENT_RELEASE_PRS}]},
completed='')
pull2 = github.PullRequest.PullRequest(
requester='', headers='',
attributes={
'title': 'PR2', 'number': 2, 'labels': [
{'name': release_constants.LABEL_FOR_RELEASED_PRS},
{'name': release_constants.LABEL_FOR_CURRENT_RELEASE_PRS}]},
completed='')
label = github.Label.Label(
requester='', headers='',
attributes={
'name': release_constants.LABEL_FOR_CURRENT_RELEASE_PRS},
completed='')
def mock_get_issues(unused_self, state, labels): # pylint: disable=unused-argument
return [pull1, pull2]
def mock_get_label(unused_self, unused_name):
return [label]
get_issues_swap = self.swap(
github.Repository.Repository, 'get_issues', mock_get_issues)
get_label_swap = self.swap(
github.Repository.Repository, 'get_label', mock_get_label)
open_tab_swap = self.swap(
common, 'open_new_tab_in_browser_if_possible', mock_open_tab)
with get_issues_swap, get_label_swap, open_tab_swap:
with self.assertRaisesRegexp(
Exception, (
'There are PRs for current release which do not '
'have a \'%s\' label. Please ensure that '
'they are released before release summary '
'generation.') % (
release_constants.LABEL_FOR_RELEASED_PRS)):
common.check_prs_for_current_release_are_released(mock_repo)
def test_kill_processes_based_on_regex(self):
killed = []
def mock_kill(p):
killed.append(MockPsutilProcess.cmdlines[p.index])
def mock_cmdlines(p):
return MockPsutilProcess.cmdlines[p.index]
def mock_process_iter():
return [MockPsutilProcess(0), MockPsutilProcess(1)]
process_iter_swap = self.swap_with_checks(
psutil, 'process_iter', mock_process_iter)
kill_swap = self.swap(MockPsutilProcess, 'kill', mock_kill)
cmdlines_swap = self.swap(MockPsutilProcess, 'cmdline', mock_cmdlines)
with process_iter_swap, kill_swap, cmdlines_swap:
common.kill_processes_based_on_regex(r'.*dev_appserver\.py')
self.assertEqual(killed, [MockPsutilProcess.cmdlines[0]])
def test_kill_processes_based_on_regex_when_access_denied(self):
killed = []
def mock_kill(p):
killed.append(MockPsutilProcess.cmdlines[p.index])
def mock_cmdlines(p):
if p.index == 0:
raise psutil.AccessDenied()
return MockPsutilProcess.cmdlines[p.index]
def mock_process_iter():
return [MockPsutilProcess(0), MockPsutilProcess(1)]
process_iter_swap = self.swap_with_checks(
psutil, 'process_iter', mock_process_iter)
kill_swap = self.swap(MockPsutilProcess, 'kill', mock_kill)
cmdlines_swap = self.swap(MockPsutilProcess, 'cmdline', mock_cmdlines)
with process_iter_swap, kill_swap, cmdlines_swap:
common.kill_processes_based_on_regex(r'.*dev_appserver\.py')
self.assertEqual(killed, [])
def test_kill_process_when_psutil_not_in_path(self):
path_swap = self.swap(sys, 'path', [])
def mock_process_iter():
return []
process_iter_swap = self.swap(psutil, 'process_iter', mock_process_iter)
with path_swap, process_iter_swap:
common.kill_processes_based_on_regex('')
def test_inplace_replace_file(self):
origin_file = os.path.join(
'core', 'tests', 'data', 'inplace_replace_test.json')
backup_file = os.path.join(
'core', 'tests', 'data', 'inplace_replace_test.json.bak')
expected_lines = [
'{\n',
' "RANDMON1" : "randomValue1",\n',
' "312RANDOM" : "ValueRanDom2",\n',
' "DEV_MODE": true,\n',
' "RAN213DOM" : "raNdoVaLue3"\n',
'}\n'
]
def mock_remove(unused_file):
return
remove_swap = self.swap_with_checks(
os, 'remove', mock_remove, expected_args=[(backup_file,)]
)
with remove_swap:
common.inplace_replace_file(
origin_file, '"DEV_MODE": .*', '"DEV_MODE": true,')
with python_utils.open_file(origin_file, 'r') as f:
self.assertEqual(expected_lines, f.readlines())
# Revert the file.
os.remove(origin_file)
shutil.move(backup_file, origin_file)
def test_inplace_replace_file_with_exception_raised(self):
origin_file = os.path.join(
'core', 'tests', 'data', 'inplace_replace_test.json')
backup_file = os.path.join(
'core', 'tests', 'data', 'inplace_replace_test.json.bak')
with python_utils.open_file(origin_file, 'r') as f:
origin_content = f.readlines()
def mock_compile(unused_arg):
raise ValueError('Exception raised from compile()')
compile_swap = self.swap_with_checks(re, 'compile', mock_compile)
with self.assertRaisesRegexp(
ValueError, r'Exception raised from compile\(\)'), compile_swap:
common.inplace_replace_file(
origin_file, '"DEV_MODE": .*', '"DEV_MODE": true,')
self.assertFalse(os.path.isfile(backup_file))
with python_utils.open_file(origin_file, 'r') as f:
new_content = f.readlines()
self.assertEqual(origin_content, new_content)
def test_convert_to_posixpath_on_windows(self):
def mock_is_windows():
return True
is_windows_swap = self.swap(common, 'is_windows_os', mock_is_windows)
original_filepath = 'c:\\path\\to\\a\\file.js'
with is_windows_swap:
actual_file_path = common.convert_to_posixpath(original_filepath)
self.assertEqual(actual_file_path, 'c:/path/to/a/file.js')
def test_convert_to_posixpath_on_platform_other_than_windows(self):
def mock_is_windows():
return False
is_windows_swap = self.swap(common, 'is_windows_os', mock_is_windows)
original_filepath = 'c:\\path\\to\\a\\file.js'
with is_windows_swap:
actual_file_path = common.convert_to_posixpath(original_filepath)
self.assertEqual(actual_file_path, original_filepath)
def test_create_readme(self):
try:
os.makedirs('readme_test_dir')
common.create_readme('readme_test_dir', 'Testing readme.')
with python_utils.open_file('readme_test_dir/README.md', 'r') as f:
self.assertEqual(f.read(), 'Testing readme.')
finally:
if os.path.exists('readme_test_dir'):
shutil.rmtree('readme_test_dir')
def test_windows_os_throws_exception_when_starting_redis_server(self):
def mock_is_windows_os():
return True
windows_not_supported_exception = self.assertRaisesRegexp(
Exception,
'The redis command line interface is not installed because your '
'machine is on the Windows operating system. The redis server '
'cannot start.')
swap_os_check = self.swap(common, 'is_windows_os', mock_is_windows_os)
with swap_os_check, windows_not_supported_exception:
common.start_redis_server()
def test_windows_os_throws_exception_when_stopping_redis_server(self):
def mock_is_windows_os():
return True
windows_not_supported_exception = self.assertRaisesRegexp(
Exception,
'The redis command line interface is not installed because your '
'machine is on the Windows operating system. There is no redis '
'server to shutdown.')
swap_os_check = self.swap(common, 'is_windows_os', mock_is_windows_os)
with swap_os_check, windows_not_supported_exception:
common.stop_redis_server()
def test_start_and_stop_server_calls_are_called(self):
# Test that starting the server calls subprocess.call().
check_function_calls = {
'subprocess_call_is_called': False
}
expected_check_function_calls = {
'subprocess_call_is_called': True
}
def mock_call(unused_cmd_tokens, *args, **kwargs): # pylint: disable=unused-argument
check_function_calls['subprocess_call_is_called'] = True
class Ret(python_utils.OBJECT):
"""Return object with required attributes."""
def __init__(self):
self.returncode = 0
def communicate(self):
"""Return required method."""
return '', ''
return Ret()
def mock_wait_for_port_to_be_open(port): # pylint: disable=unused-argument
return
swap_call = self.swap(subprocess, 'call', mock_call)
swap_wait_for_port_to_be_open = self.swap(
common, 'wait_for_port_to_be_open',
mock_wait_for_port_to_be_open)
with swap_call, swap_wait_for_port_to_be_open:
common.start_redis_server()
self.assertEqual(check_function_calls, expected_check_function_calls)
# Test that stopping the server calls subprocess.call().
check_function_calls = {
'subprocess_call_is_called': False
}
expected_check_function_calls = {
'subprocess_call_is_called': True
}
swap_call = self.swap(subprocess, 'call', mock_call)
with swap_call:
common.stop_redis_server()
self.assertEqual(check_function_calls, expected_check_function_calls)
def test_start_server_removes_redis_dump(self):
check_function_calls = {
'os_remove_is_called': False
}
def mock_os_remove_file(file_path): # pylint: disable=unused-argument
check_function_calls['os_remove_is_called'] = True
def mock_os_path_exists(file_path): # pylint: disable=unused-argument
return True
def mock_call(unused_cmd_tokens, *args, **kwargs): # pylint: disable=unused-argument
class Ret(python_utils.OBJECT):
"""Return object with required attributes."""
def __init__(self):
self.returncode = 0
def communicate(self):
"""Return required method."""
return '', ''
return Ret()
def mock_wait_for_port_to_be_open(port): # pylint: disable=unused-argument
return
swap_call = self.swap(subprocess, 'call', mock_call)
swap_wait_for_port_to_be_open = self.swap(
common, 'wait_for_port_to_be_open',
mock_wait_for_port_to_be_open)
swap_os_remove = self.swap(os, 'remove', mock_os_remove_file)
swap_os_path_exists = self.swap(os.path, 'exists', mock_os_path_exists)
with swap_call, swap_wait_for_port_to_be_open, swap_os_remove, (
swap_os_path_exists):
common.start_redis_server()
self.assertTrue(check_function_calls['os_remove_is_called'])
|
|
#!/usr/bin/env python
"""This file controls the central function of EVOLVE, the calculation of the log-
likelihood of an alignment given a phylogenetic tree and substitution model.
The likelihood calculation is done according to Felsenstein's 1981 pruning
algorithm. This file contains a Python implementation of that
algorithm and an interface to a more computationally efficient Pyrex
implementation. The two versions are maintained for the purpose of cross-
validating accuracy. The calculations can be performed for tree's that have polytomies
in addition to binary trees.
"""
import numpy
Float = numpy.core.numerictypes.sctype2char(float)
from cogent.recalculation.definition import CalculationDefn, _FuncDefn, \
CalcDefn, ProbabilityParamDefn, NonParamDefn, SumDefn, CallDefn, \
ParallelSumDefn
from cogent.evolve.likelihood_tree import LikelihoodTreeEdge
from cogent.evolve.simulate import argpick
from cogent.maths.markov import SiteClassTransitionMatrix
__author__ = "Peter Maxwell"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Peter Maxwell", "Gavin Huttley"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Peter Maxwell"
__email__ = "[email protected]"
__status__ = "Production"
class _PartialLikelihoodDefn(CalculationDefn):
def setup(self, edge_name):
self.edge_name = edge_name
class LeafPartialLikelihoodDefn(_PartialLikelihoodDefn):
name = "sequence"
def calc(self, lh_tree):
lh_leaf = lh_tree.getEdge(self.edge_name)
return lh_leaf.input_likelihoods
class PartialLikelihoodProductDefn(_PartialLikelihoodDefn):
name = "plh"
recycling = True
def calc(self, recycled_result, lh_edge, *child_likelihoods):
if recycled_result is None:
recycled_result = lh_edge.makePartialLikelihoodsArray()
return lh_edge.sumInputLikelihoodsR(recycled_result, *child_likelihoods)
class PartialLikelihoodProductDefnFixedMotif(PartialLikelihoodProductDefn):
def calc(self, recycled_result, fixed_motif, lh_edge, *child_likelihoods):
if recycled_result is None:
recycled_result = lh_edge.makePartialLikelihoodsArray()
result = lh_edge.sumInputLikelihoodsR(
recycled_result, *child_likelihoods)
if fixed_motif not in [None, -1]:
for motif in range(result.shape[-1]):
if motif != fixed_motif:
result[:, motif] = 0.0
return result
class LhtEdgeLookupDefn(CalculationDefn):
name = 'col_index'
def setup(self, edge_name):
self.edge_name = edge_name
# so that it can be found by reconstructAncestralSeqs etc:
if edge_name == 'root':
self.name = 'root'
def calc(self, lht):
return lht.getEdge(self.edge_name)
def makePartialLikelihoodDefns(edge, lht, psubs, fixed_motifs):
kw = {'edge_name':edge.Name}
if edge.istip():
plh = LeafPartialLikelihoodDefn(lht, **kw)
else:
lht_edge = LhtEdgeLookupDefn(lht, **kw)
children = []
for child in edge.Children:
child_plh = makePartialLikelihoodDefns(child, lht, psubs,
fixed_motifs)
psub = psubs.selectFromDimension('edge', child.Name)
child_plh = CalcDefn(numpy.inner)(child_plh, psub)
children.append(child_plh)
if fixed_motifs:
fixed_motif = fixed_motifs.selectFromDimension('edge', edge.Name)
plh = PartialLikelihoodProductDefnFixedMotif(
fixed_motif, lht_edge, *children, **kw)
else:
plh = PartialLikelihoodProductDefn(lht, *children, **kw)
return plh
def recursive_lht_build(edge, leaves):
if edge.istip():
lhe = leaves[edge.Name]
else:
lht_children = []
for child in edge.Children:
lht = recursive_lht_build(child, leaves)
lht_children.append(lht)
lhe = LikelihoodTreeEdge(lht_children, edge_name=edge.Name)
return lhe
class LikelihoodTreeDefn(CalculationDefn):
name = 'lht'
def setup(self, tree):
self.tree = tree
def calc(self, leaves):
return recursive_lht_build(self.tree, leaves)
class LikelihoodTreeAlignmentSplitterDefn(CalculationDefn):
name = 'local_lht'
def calc(self, parallel_context, lht):
return lht.parallelShare(parallel_context)
def makeTotalLogLikelihoodDefn(tree, leaves, psubs, mprobs, bprobs, bin_names,
locus_names, sites_independent):
fixed_motifs = NonParamDefn('fixed_motif', ['edge'])
lht = LikelihoodTreeDefn(leaves, tree=tree)
# Split up the alignment columns between the available CPUs.
parallel_context = NonParamDefn('parallel_context')
lht = LikelihoodTreeAlignmentSplitterDefn(parallel_context, lht)
plh = makePartialLikelihoodDefns(tree, lht, psubs, fixed_motifs)
# After the root partial likelihoods have been calculated it remains to
# sum over the motifs, local sites, other sites (ie: cpus), bins and loci.
# The motifs are always done first, but after that it gets complicated.
# If a bin HMM is being used then the sites from the different CPUs must
# be interleaved first, otherwise summing over the CPUs is done last to
# minimise inter-CPU communicaton.
root_mprobs = mprobs.selectFromDimension('edge', 'root')
lh = CalcDefn(numpy.inner, name='lh')(plh, root_mprobs)
if len(bin_names) > 1:
if sites_independent:
site_pattern = CalcDefn(BinnedSiteDistribution, name='bdist')(
bprobs)
else:
parallel_context = None # hmm does the gathering over CPUs
switch = ProbabilityParamDefn('bin_switch', dimensions=['locus'])
site_pattern = CalcDefn(PatchSiteDistribution, name='bdist')(
switch, bprobs)
blh = CallDefn(site_pattern, lht, name='bindex')
tll = CallDefn(blh, *lh.acrossDimension('bin', bin_names),
**dict(name='tll'))
else:
lh = lh.selectFromDimension('bin', bin_names[0])
tll = CalcDefn(log_sum_across_sites, name='logsum')(lht, lh)
if len(locus_names) > 1 or parallel_context is None:
# "or parallel_context is None" only because SelectFromDimension
# currently has no .makeParamController() method.
tll = SumDefn(*tll.acrossDimension('locus', locus_names))
else:
tll = tll.selectFromDimension('locus', locus_names[0])
if parallel_context is not None:
tll = ParallelSumDefn(parallel_context, tll)
return tll
def log_sum_across_sites(root, root_lh):
return root.getLogSumAcrossSites(root_lh)
class BinnedSiteDistribution(object):
def __init__(self, bprobs):
self.bprobs = bprobs
def getWeightedSumLh(self, lhs):
result = numpy.zeros(lhs[0].shape, lhs[0].dtype.char)
temp = numpy.empty(result.shape, result.dtype.char)
for (bprob, lh) in zip(self.bprobs, lhs):
temp[:] = lh
temp *= bprob
result += temp
return result
def __call__(self, root):
return BinnedLikelihood(self, root)
def emit(self, length, random_series):
result = numpy.zeros([length], int)
for i in range(length):
result[i] = argpick(self.bprobs, random_series)
return result
class PatchSiteDistribution(object):
def __init__(self, switch, bprobs):
half = len(bprobs) // 2
self.alloc = [0] * half + [1] * (len(bprobs)-half)
pprobs = numpy.zeros([max(self.alloc)+1], Float)
for (b,p) in zip(self.alloc, bprobs):
pprobs[b] += p
self.bprobs = [p/pprobs[self.alloc[i]] for (i,p) in enumerate(bprobs)]
self.transition_matrix = SiteClassTransitionMatrix(switch, pprobs)
def getWeightedSumLhs(self, lhs):
result = numpy.zeros((2,)+lhs[0].shape, lhs[0].dtype.char)
temp = numpy.empty(lhs[0].shape, result.dtype.char)
for (patch, weight, lh) in zip(self.alloc, self.bprobs, lhs):
temp[:] = lh
temp *= weight
result[patch] += temp
return result
def __call__(self, root):
return SiteHmm(self, root)
def emit(self, length, random_series):
bprobs = [[p for (patch,p) in zip(self.alloc, self.bprobs) if patch==a]
for a in [0,1]]
source = self.transition_matrix.emit(random_series)
result = numpy.zeros([length], int)
for i in range(length):
patch = source.next() - 1
result[i] = argpick(bprobs[patch], random_series)
return result
class BinnedLikelihood(object):
def __init__(self, distrib, root):
self.distrib = distrib
self.root = root
def __call__(self, *lhs):
result = self.distrib.getWeightedSumLh(lhs)
return self.root.getLogSumAcrossSites(result)
def getPosteriorProbs(self, *lhs):
# posterior bin probs, not motif probs
assert len(lhs) == len(self.distrib.bprobs)
result = numpy.array(
[b*self.root.getFullLengthLikelihoods(p)
for (b,p) in zip(self.distrib.bprobs, lhs)])
result /= result.sum(axis=0)
return result
class SiteHmm(object):
def __init__(self, distrib, root):
self.root = root
self.distrib = distrib
def __call__(self, *lhs):
plhs = self.distrib.getWeightedSumLhs(lhs)
plhs = numpy.ascontiguousarray(numpy.transpose(plhs))
matrix = self.distrib.transition_matrix
return self.root.logDotReduce(
matrix.StationaryProbs, matrix.Matrix, plhs)
def getPosteriorProbs(self, *lhs):
plhs = []
for lh in self.distrib.getWeightedSumLhs(lhs):
plh = self.root.getFullLengthLikelihoods(lh)
plhs.append(plh)
plhs = numpy.transpose(plhs)
pprobs = self.distrib.transition_matrix.getPosteriorProbs(plhs)
pprobs = numpy.array(numpy.transpose(pprobs))
lhs = numpy.array(lhs)
blhs = lhs / numpy.sum(lhs, axis=0)
blhs = numpy.array(
[b * self.root.getFullLengthLikelihoods(p)
for (b,p) in zip(self.distrib.bprobs, blhs)])
binsum = numpy.zeros(pprobs.shape, Float)
for (patch, data) in zip(self.distrib.alloc, blhs):
binsum[patch] += data
for (patch, data) in zip(self.distrib.alloc, blhs):
data *= pprobs[patch] / binsum[patch]
return blhs
|
|
"""
plot_radar.py
Class instance used to make Display.
"""
from __future__ import print_function
# Load the needed packages
import numpy as np
import os
import pyart
from matplotlib.backends.qt_compat import is_pyqt5
if is_pyqt5():
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
else:
from matplotlib.backends.backend_qt4agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
from matplotlib.colors import Normalize as mlabNormalize
from matplotlib.colorbar import ColorbarBase as mlabColorbarBase
from matplotlib.pyplot import cm
from ..core import (Variable, Component, common, VariableChoose, QtCore,
QtGui, QtWidgets, log)
from ..core.points import Points
# Save image file type and DPI (resolution)
IMAGE_EXT = 'png'
DPI = 200
# ========================================================================
class RadarDisplay(Component):
'''
Class to create a display plot, using a returned Radar structure
from the PyArt pyart.graph package.
'''
Vradar = None #: see :ref:`shared_variable`
Vfield = None #: see :ref:`shared_variable`
Vtilt = None #: see :ref:`shared_variable`
Vlimits = None #: see :ref:`shared_variable`
Vcolormap = None #: see :ref:`shared_variable`
Vgatefilter = None #: see :ref:`shared_variable`
VplotAxes = None #: see :ref:`shared_variable` (no internal use)
VpathInteriorFunc = None #: see :ref:`shared_variable` (no internal use)
VpyartDisplay = None #: see :ref:`shared_variable`
@classmethod
def guiStart(self, parent=None):
'''Graphical interface for starting this class'''
args = _DisplayStart().startDisplay()
args['parent'] = parent
return self(**args), True
def __init__(self, Vradar=None, Vfield=None, Vtilt=None, Vlimits=None,
Vcolormap=None, Vgatefilter=None,
name="RadarDisplay", parent=None):
'''
Initialize the class to create display.
Parameters
----------
[Optional]
Vradar : :py:class:`~artview.core.core.Variable` instance
Radar signal variable. If None start new one with None.
Vfield : :py:class:`~artview.core.core.Variable` instance
Field signal variable. If None start new one with empty string.
Vtilt : :py:class:`~artview.core.core.Variable` instance
Tilt signal variable. If None start new one with 0.
Vlimits : :py:class:`~artview.core.core.Variable` instance
Limits signal variable.
A value of None will instantiate a limits variable.
Vcolormap : :py:class:`~artview.core.core.Variable` instance
Colormap signal variable.
A value of None will instantiate a colormap variable.
Vgatefilter : :py:class:`~artview.core.core.Variable` instance
Gatefilter signal variable.
A value of None will instantiate a empty variable.
name : string
Display window name.
parent : PyQt instance
Parent instance to associate to Display window.
If None, then Qt owns, otherwise associated with parent PyQt
instance.
'''
super(RadarDisplay, self).__init__(name=name, parent=parent)
self.setFocusPolicy(QtCore.Qt.ClickFocus)
self.setMinimumSize(20,20)
#self.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Expanding)
# Set up signal, so that DISPLAY can react to
# external (or internal) changes in radar, field,
# lims and tilt (expected to be Core.Variable instances)
# The capital V so people remember using ".value"
if Vradar is None:
self.Vradar = Variable(None)
else:
self.Vradar = Vradar
if Vfield is None:
self.Vfield = Variable('')
else:
self.Vfield = Vfield
if Vtilt is None:
self.Vtilt = Variable(0)
else:
self.Vtilt = Vtilt
if Vlimits is None:
self.Vlimits = Variable(None)
else:
self.Vlimits = Vlimits
if Vcolormap is None:
self.Vcolormap = Variable(None)
else:
self.Vcolormap = Vcolormap
if Vgatefilter is None:
self.Vgatefilter = Variable(None)
else:
self.Vgatefilter = Vgatefilter
self.VpathInteriorFunc = Variable(self.getPathInteriorValues)
self.VplotAxes = Variable(None)
self.VpyartDisplay = Variable(None)
self.sharedVariables = {"Vradar": self.NewRadar,
"Vfield": self.NewField,
"Vtilt": self.NewTilt,
"Vlimits": self.NewLims,
"Vcolormap": self.NewCmap,
"Vgatefilter": self.NewGatefilter,
"VpathInteriorFunc": None,
"VplotAxes": None,
"VpyartDisplay": self.NewDisplay}
# Connect the components
self.connectAllVariables()
self.plot_type = None
# Set plot title and colorbar units to defaults
self.title = self._get_default_title()
self.units = self._get_default_units()
# Set the default range rings
self.RngRingList = ["None", "10 km", "20 km", "30 km",
"50 km", "100 km"]
self.RngRing = False
# Find the PyArt colormap names
# self.cm_names = [m for m in cm.datad if not m.endswith("_r")]
self.cm_names = ["pyart_" + m for m in pyart.graph.cm.datad
if not m.endswith("_r")]
self.cm_names.sort()
# Create tool dictionary
self.tools = {}
# Create display image text dictionary
self.disp_text = {}
# Set up Default limits and cmap
if Vlimits is None:
self._set_default_limits(strong=False)
if Vcolormap is None:
self._set_default_cmap(strong=False)
# Create a figure for output
self._set_fig_ax()
# Launch the GUI interface
self.LaunchGUI()
# Initialize radar variable
self.NewRadar(None, True)
self.show()
def keyPressEvent(self, event):
'''Allow tilt adjustment via the Up-Down arrow keys.'''
if event.key() == QtCore.Qt.Key_Up:
self.TiltSelectCmd(self.Vtilt.value + 1)
elif event.key() == QtCore.Qt.Key_Down:
self.TiltSelectCmd(self.Vtilt.value - 1)
else:
super(RadarDisplay, self).keyPressEvent(event)
####################
# GUI methods #
####################
def LaunchGUI(self):
'''Launches a GUI interface.'''
# Create layout
self.layout = QtWidgets.QGridLayout()
self.layout.setSpacing(8)
# Create the widget
self.central_widget = QtWidgets.QWidget()
self.setCentralWidget(self.central_widget)
self._set_figure_canvas()
self.central_widget.setLayout(self.layout)
# Add buttons along display for user control
self.addButtons()
self.setUILayout()
# Set the status bar to display messages
self.statusbar = self.statusBar()
##################################
# User display interface methods #
##################################
def addButtons(self):
'''Add a series of buttons for user control over display.'''
# Create the Display controls
self._add_displayBoxUI()
# Create the Tilt controls
self._add_tiltBoxUI()
# Create the Field controls
self._add_fieldBoxUI()
# Create the Tools controls
self._add_toolsBoxUI()
# Create the Informational label at top
self._add_infolabel()
def setUILayout(self):
'''Setup the button/display UI layout.'''
self.layout.addWidget(self.tiltBox, 0, 0)
self.layout.addWidget(self.fieldBox, 0, 1)
self.layout.addWidget(self.dispButton, 0, 2)
self.layout.addWidget(self.toolsButton, 0, 3)
self.layout.addWidget(self.infolabel, 0, 4)
#############################
# Functionality methods #
#############################
def _open_LimsDialog(self):
'''Open a dialog box to change display limits.'''
from .limits import limits_dialog
limits, cmap, aspect, change = limits_dialog(
self.Vlimits.value, self.Vcolormap.value, self.ax.get_aspect(),
self.name)
if aspect != self.ax.get_aspect():
self.ax.set_aspect(aspect)
if change == 1:
self.Vcolormap.change(cmap)
self.Vlimits.change(limits)
def _fillTiltBox(self):
'''Fill in the Tilt Window Box with current elevation angles.'''
self.tiltBox.clear()
self.tiltBox.addItem("Tilt Window")
# Loop through and create each tilt button
elevs = self.Vradar.value.fixed_angle['data'][:]
for i, ntilt in enumerate(self.rTilts):
btntxt = "%2.1f deg (Tilt %d)" % (elevs[i], i+1)
self.tiltBox.addItem(btntxt)
def _fillFieldBox(self):
'''Fill in the Field Window Box with current variable names.'''
self.fieldBox.clear()
self.fieldBox.addItem("Field Window")
# Loop through and create each field button
for field in self.fieldnames:
self.fieldBox.addItem(field)
def _tiltAction(self, text):
'''Define action for Tilt Button selection.'''
if text == "Tilt Window":
self._open_tiltbuttonwindow()
else:
ntilt = int(text.split("(Tilt ")[1][:-1])-1
self.TiltSelectCmd(ntilt)
def _fieldAction(self, text):
'''Define action for Field Button selection.'''
if text == "Field Window":
self._open_fieldbuttonwindow()
else:
self.FieldSelectCmd(str(text))
def _GateFilterToggleAction(self):
'''Define action for GateFilterToggle menu selection.'''
if self.gatefilterToggle.isChecked():
self.gatefilterToggle.setText("GateFilter On")
else:
self.gatefilterToggle.setText("GateFilter Off")
self._update_plot()
def _IgnoreEdgesToggleAction(self):
'''Define action for IgnoreEdgesToggle menu selection.'''
if self.ignoreEdgesToggle.isChecked():
self.ignoreEdges = False
else:
self.ignoreEdges = True
self._update_plot()
def _UseMapToggleAction(self):
'''Define action for IgnoreEdgesToggle menu selection.'''
self._check_file_type()
self._update_display()
def _title_input(self):
'''Retrieve new plot title.'''
val, entry = common.string_dialog_with_reset(
self.title, "Plot Title", "Title:", self._get_default_title())
if entry is True:
self.title = val
self._update_plot()
def _units_input(self):
'''Retrieve new plot units.'''
val, entry = common.string_dialog_with_reset(
self.units, "Plot Units", "Units:", self._get_default_units())
if entry is True:
self.units = val
self._update_plot()
def _add_ImageText(self):
'''Add a text box to display.'''
from .image_text import ImageTextBox
itext = ImageTextBox(self, parent=self.parent)
return itext
def _open_tiltbuttonwindow(self):
'''Open a TiltButtonWindow instance.'''
from .level import LevelButtonWindow
self.tiltbuttonwindow = LevelButtonWindow(
self.Vtilt, plot_type=self.plot_type, Vcontainer=self.Vradar,
name=self.name+" Tilt Selection", parent=self.parent)
def _open_fieldbuttonwindow(self):
'''Open a FieldButtonWindow instance.'''
from .field import FieldButtonWindow
self.fieldbuttonwindow = FieldButtonWindow(
self.Vradar, self.Vfield,
name=self.name+" Field Selection", parent=self.parent)
def _add_RngRing_to_button(self):
'''Add a menu to display range rings on plot.'''
for RngRing in self.RngRingList:
RingAction = self.dispRngRingmenu.addAction(RngRing)
RingAction.setStatusTip("Apply Range Rings every %s" % RngRing)
RingAction.triggered.connect(
lambda check, RngRing=RngRing: self.RngRingSelectCmd(RngRing))
self.dispRngRing.setMenu(self.dispRngRingmenu)
def _add_cmaps_to_button(self):
'''Add a menu to change colormap used for plot.'''
for cm_name in self.cm_names:
cmapAction = self.dispCmapmenu.addAction(cm_name)
cmapAction.setStatusTip("Use the %s colormap" % cm_name)
cmapAction.triggered.connect(
lambda check, cm_name=cm_name: self.cmapSelectCmd(cm_name))
self.dispCmap.setMenu(self.dispCmapmenu)
def _add_displayBoxUI(self):
'''Create the Display Options Button menu.'''
self.dispButton = QtWidgets.QPushButton("Display Options")
self.dispButton.setToolTip("Adjust display properties")
self.dispButton.setFocusPolicy(QtCore.Qt.NoFocus)
dispmenu = QtWidgets.QMenu(self)
dispLimits = dispmenu.addAction("Adjust Display Limits")
dispLimits.setToolTip("Set data, X, and Y range limits")
self.colormapToggle = QtWidgets.QAction(
'Colormap', dispmenu, checkable=True,
triggered=self._update_plot)
dispmenu.addAction(self.colormapToggle)
self.colormapToggle.setChecked(True)
self.gatefilterToggle = QtWidgets.QAction(
'GateFilter On', dispmenu, checkable=True,
triggered=self._GateFilterToggleAction)
dispmenu.addAction(self.gatefilterToggle)
self.gatefilterToggle.setChecked(True)
self.ignoreEdgesToggle = QtWidgets.QAction(
'Ignore Edges', dispmenu, checkable=True,
triggered=self._IgnoreEdgesToggleAction)
dispmenu.addAction(self.ignoreEdgesToggle)
self.ignoreEdgesToggle.setChecked(False)
self.useMapToggle = QtWidgets.QAction(
'Use MapDisplay', dispmenu, checkable=True,
triggered=self._UseMapToggleAction)
dispmenu.addAction(self.useMapToggle)
self.useMapToggle.setChecked(False)
dispTitle = dispmenu.addAction("Change Title")
dispTitle.setToolTip("Change plot title")
dispUnit = dispmenu.addAction("Change Units")
dispUnit.setToolTip("Change units string")
self.dispRngRing = dispmenu.addAction("Add Range Rings")
self.dispRngRingmenu = QtWidgets.QMenu("Add Range Rings")
self.dispRngRingmenu.setFocusPolicy(QtCore.Qt.NoFocus)
self.dispCmap = dispmenu.addAction("Change Colormap")
self.dispCmapmenu = QtWidgets.QMenu("Change Cmap")
self.dispCmapmenu.setFocusPolicy(QtCore.Qt.NoFocus)
changeAxesPosition = dispmenu.addAction("Change Axes Position")
self.dispImageText = dispmenu.addAction("Add Text to Image")
self.dispImageText.setToolTip("Add Text Box to Image")
dispQuickSave = dispmenu.addAction("Quick Save Image")
dispQuickSave.setShortcut("Ctrl+D")
dispQuickSave.setToolTip(
"Save Image to local directory with default name")
dispSaveFile = dispmenu.addAction("Save Image")
dispSaveFile.setShortcut("Ctrl+S")
dispSaveFile.setStatusTip("Save Image using dialog")
dispLimits.triggered.connect(self._open_LimsDialog)
dispTitle.triggered.connect(self._title_input)
dispUnit.triggered.connect(self._units_input)
changeAxesPosition.triggered.connect(self._change_axes_position)
self.dispImageText.triggered.connect(self._add_ImageText)
dispQuickSave.triggered.connect(self._quick_savefile)
dispSaveFile.triggered.connect(self._savefile)
self._add_RngRing_to_button()
self._add_cmaps_to_button()
self.dispButton.setMenu(dispmenu)
def _add_tiltBoxUI(self):
'''Create the Tilt Selection ComboBox.'''
self.tiltBox = QtWidgets.QComboBox()
self.tiltBox.setFocusPolicy(QtCore.Qt.NoFocus)
self.tiltBox.setToolTip("Select tilt elevation angle to display.\n"
"'Tilt Window' will launch popup.\n"
"Up/Down arrow keys Increase/Decrease tilt.")
self.tiltBox.activated[str].connect(self._tiltAction)
def _add_fieldBoxUI(self):
'''Create the Field Selection ComboBox.'''
self.fieldBox = QtWidgets.QComboBox()
self.fieldBox.setFocusPolicy(QtCore.Qt.NoFocus)
self.fieldBox.setToolTip("Select variable/field in data file.\n"
"'Field Window' will launch popup.\n")
self.fieldBox.activated[str].connect(self._fieldAction)
def _add_toolsBoxUI(self):
'''Create the Tools Button menu.'''
self.toolsButton = QtWidgets.QPushButton("Toolbox")
self.toolsButton.setFocusPolicy(QtCore.Qt.NoFocus)
self.toolsButton.setToolTip("Choose a tool to apply")
toolmenu = QtWidgets.QMenu(self)
toolZoomPan = toolmenu.addAction("Zoom/Pan")
toolValueClick = toolmenu.addAction("Click for Value")
toolReset = toolmenu.addAction("Reset Tools")
toolDefault = toolmenu.addAction("Reset File Defaults")
toolZoomPan.triggered.connect(self.toolZoomPanCmd)
toolValueClick.triggered.connect(self.toolValueClickCmd)
toolReset.triggered.connect(self.toolResetCmd)
toolDefault.triggered.connect(self.toolDefaultCmd)
self.toolmenu = toolmenu
self.toolsButton.setMenu(toolmenu)
def add_mode(self, mode, label):
""" Add a tool entry with given label. Selecting that tool, execute
mode passing this component shared variables."""
def call_mode():
mode(self.get_sharedVariables())
action = self.toolmenu.addAction(label)
action.triggered.connect(call_mode)
def _add_infolabel(self):
'''Create an information label about the display'''
self.infolabel = QtWidgets.QLabel("Radar: \n"
"Field: \n"
"Tilt: ", self)
self.infolabel.setStyleSheet('color: red; font: italic 10px')
self.infolabel.setToolTip("Filename not loaded")
def _update_infolabel(self):
if self.Vradar.value is None:
return
self.infolabel.setText("Radar: %s\n"
"Field: %s\n"
"Tilt: %d" % (
self.Vradar.value.metadata[
'instrument_name'],
self.Vfield.value,
self.Vtilt.value+1))
if hasattr(self.Vradar.value, 'filename'):
self.infolabel.setToolTip(self.Vradar.value.filename)
########################
# Selectionion methods #
########################
def NewRadar(self, variable, strong):
'''
Slot for 'ValueChanged' signal of
:py:class:`Vradar <artview.core.core.Variable>`.
This will:
* Update fields and tilts lists and MenuBoxes
* Check radar scan type and reset limits if needed
* Reset units and title
* If strong update: update plot
'''
# test for None
if self.Vradar.value is None:
self.fieldBox.clear()
self.tiltBox.clear()
return
# Get the tilt angles
self.rTilts = self.Vradar.value.sweep_number['data'][:]
# Get field names
self.fieldnames = self.Vradar.value.fields.keys()
# Check the file type and initialize limts
self._check_file_type()
# Update field and tilt MenuBox
self._fillTiltBox()
self._fillFieldBox()
self.units = self._get_default_units()
self.title = self._get_default_title()
if strong:
self._update_display()
self._update_infolabel()
self.VpathInteriorFunc.update(True)
def NewField(self, variable, strong):
'''
Slot for 'ValueChanged' signal of
:py:class:`Vfield <artview.core.core.Variable>`.
This will:
* Reset colormap
* Reset units
* Update fields MenuBox
* If strong update: update plot
'''
if self.Vcolormap.value['lock'] is False:
self._set_default_cmap(strong=False)
self.units = self._get_default_units()
self.title = self._get_default_title()
idx = self.fieldBox.findText(self.Vfield.value)
self.fieldBox.setCurrentIndex(idx)
if strong:
self._update_plot()
self._update_infolabel()
self.VpathInteriorFunc.update(True)
def NewLims(self, variable, strong):
'''
Slot for 'ValueChanged' signal of
:py:class:`Vlimits <artview.core.core.Variable>`.
This will:
* If strong update: update axes
'''
if strong:
self._update_axes()
def NewCmap(self, variable, strong):
'''
Slot for 'ValueChanged' signal of
:py:class:`Vcolormap <artview.core.core.Variable>`.
This will:
* If strong update: update plot
'''
if strong:
self._update_plot()
def NewGatefilter(self, variable, strong):
'''
Slot for 'ValueChanged' signal of
:py:class:`Vgatefilter <artview.core.core.Variable>`.
This will:
* If strong update: update plot
'''
if strong:
self._update_plot()
def NewTilt(self, variable, strong):
'''
Slot for 'ValueChanged' signal of
:py:class:`Vtilt <artview.core.core.Variable>`.
This will:
* Update tilt MenuBox
* If strong update: update plot
'''
# +1 since the first one is "Tilt Window"
self.tiltBox.setCurrentIndex(self.Vtilt.value+1)
if strong:
self.title = self._get_default_title()
self._update_plot()
self._update_infolabel()
self.VpathInteriorFunc.update(True)
def NewDisplay(self, variable, strong):
'''
Slot for 'ValueChanged' signal of
:py:class:`VpyartDisplay <artview.core.core.Variable>`.
This will:
* If strong update: update plot (redraws)
* else redraw canvas
'''
if strong:
self._update_plot()
else:
self.canvas.draw()
def TiltSelectCmd(self, ntilt):
'''
Captures tilt selection and update tilt
:py:class:`~artview.core.core.Variable`.
'''
if ntilt < 0:
ntilt = len(self.rTilts)-1
elif ntilt >= len(self.rTilts):
ntilt = 0
self.Vtilt.change(ntilt)
def FieldSelectCmd(self, name):
'''
Captures field selection and update field
:py:class:`~artview.core.core.Variable`.
'''
self.Vfield.change(name)
def RngRingSelectCmd(self, ringSel):
'''
Captures Range Ring selection and
redraws the field with range rings.
'''
if ringSel is "None":
self.RngRing = False
else:
self.RngRing = True
# Find the unambigous range of the radar
try:
unrng = int(self.Vradar.value.instrument_parameters[
'unambiguous_range']['data'][0]/1000)
except:
unrng = int(self.Vlimits.value['xmax'])
# Set the step
if ringSel == '10 km':
ringdel = 10
if ringSel == '20 km':
ringdel = 20
if ringSel == '30 km':
ringdel = 30
if ringSel == '50 km':
ringdel = 50
if ringSel == '100 km':
ringdel = 100
# Calculate an array of range rings
self.RNG_RINGS = range(ringdel, unrng, ringdel)
self._update_plot()
def cmapSelectCmd(self, cm_name):
'''Captures colormap selection and redraws.'''
CMAP = cm_name
self.Vcolormap.value['cmap'] = cm_name
self.Vcolormap.update()
def toolZoomPanCmd(self):
'''Creates and connects to a Zoom/Pan instance.'''
from .toolbox import ZoomPan
scale = 1.1
self.tools['zoompan'] = ZoomPan(
self.Vlimits, self.ax,
base_scale=scale, parent=self.parent)
self.tools['zoompan'].connect()
def toolValueClickCmd(self):
'''Creates and connects to Point-and-click value retrieval'''
from .toolbox import ValueClick
self.tools['valueclick'] = ValueClick(
self, name=self.name + "ValueClick", parent=self.parent)
self.tools['valueclick'].connect()
def toolResetCmd(self):
'''Reset tools via disconnect.'''
from . import toolbox
self.tools = toolbox.reset_tools(self.tools)
def toolDefaultCmd(self):
'''Restore the Display defaults.'''
for key in self.tools.keys():
if self.tools[key] is not None:
self.tools[key].disconnect()
self.tools[key] = None
if self.Vcolormap.value['lock'] is False:
self._set_default_cmap()
self._set_default_limits()
def getPathInteriorValues(self, paths):
'''
Return the bins values path.
Parameters
----------
paths : list of :py:class:`matplotlib.path.Path` instances
Returns
-------
points : :py:class`artview.core.points.Points`
Points object containing all bins of the current radar
and tilt inside path. Axes : 'x_disp', 'y_disp', 'ray_index',
'range_index', 'azimuth', 'range'. Fields: just current field
Notes
-----
If Vradar.value is None, returns None
'''
from .toolbox import interior_radar
radar = self.Vradar.value
tilt = self.Vtilt.value
if radar is None or not self.VpyartDisplay.value:
return None
try:
iter(paths)
except:
paths = [paths]
xy = np.empty((0, 2))
idx = np.empty((0, 2), dtype=np.int)
for path in paths:
try:
x, y, z = self.VpyartDisplay.value._get_x_y_z(
tilt, False, True)
except:
x, y, z = self.VpyartDisplay.value._get_x_y_z(
self.Vfield.value, tilt, False, True)
if self.plot_type == "radarAirborne":
_xy = np.empty(shape=(x.size, 2))
_xy[:, 0] = x.flatten()
_xy[:, 1] = z.flatten()
ind = np.nonzero([path.contains_point(p) for p in _xy])[0]
_xy = _xy[ind]
ngates = radar.range['data'].size
rayIndex = (radar.sweep_start_ray_index['data'][tilt] +
ind / ngates)
gateIndex = ind % ngates
_idx = np.concatenate((rayIndex[np.newaxis],
gateIndex[np.newaxis]), axis=0)
_idx = _idx.transpose().astype(np.int)
elif self.plot_type == "radarPpi":
_xy = np.empty(shape=(x.size, 2))
_xy[:, 0] = x.flatten()
_xy[:, 1] = y.flatten()
ind = np.nonzero([path.contains_point(p) for p in _xy])[0]
_xy = _xy[ind]
ngates = radar.range['data'].size
rayIndex = (radar.sweep_start_ray_index['data'][tilt] +
ind / ngates)
gateIndex = ind % ngates
_idx = np.concatenate((rayIndex[np.newaxis],
gateIndex[np.newaxis]), axis=0)
_idx = _idx.transpose().astype(np.int)
elif self.plot_type == "radarRhi":
_xy = np.empty(shape=(x.size, 2))
r = np.sqrt(x ** 2 + y ** 2) * np.sign(y)
if np.all(r < 1.):
r = -r
_xy[:, 0] = r.flatten()
_xy[:, 1] = z.flatten()
ind = np.nonzero([path.contains_point(p) for p in _xy])[0]
_xy = _xy[ind]
ngates = radar.range['data'].size
rayIndex = (radar.sweep_start_ray_index['data'][tilt] +
ind / ngates)
gateIndex = ind % ngates
_idx = np.concatenate((rayIndex[np.newaxis],
gateIndex[np.newaxis]), axis=0)
_idx = _idx.transpose().astype(np.int)
xy = np.concatenate((xy, _xy))
idx = np.concatenate((idx, _idx))
xaxis = {'data': xy[:, 0] * 1000.,
'long_name': 'X-coordinate in Cartesian system',
'axis': 'X',
'units': 'm'}
yaxis = {'data': xy[:, 1] * 1000.,
'long_name': 'Y-coordinate in Cartesian system',
'axis': 'Y',
'units': 'm'}
azi = radar.azimuth.copy()
azi['data'] = radar.azimuth['data'][idx[:, 0]]
rng = radar.range.copy()
rng['data'] = radar.range['data'][idx[:, 1]]
field = radar.fields[self.Vfield.value].copy()
field['data'] = radar.fields[self.Vfield.value]['data'][
idx[:, 0], idx[:, 1]]
ray_idx = {'data': idx[:, 0],
'long_name': 'index in ray dimension'}
rng_idx = {'data': idx[:, 1],
'long_name': 'index in range dimension'}
axes = {'x_disp': xaxis,
'y_disp': yaxis,
'ray_index': ray_idx,
'range_index': rng_idx,
'azimuth': azi,
'range': rng}
fields = {self.Vfield.value: field}
points = Points(fields, axes, radar.metadata.copy(), xy.shape[0])
return points
####################
# Plotting methods #
####################
def _set_fig_ax(self):
'''Set the figure and axis to plot.'''
self.XSIZE = 8
self.YSIZE = 8
self.fig = Figure(figsize=(self.XSIZE, self.YSIZE))
self.ax = self.fig.add_axes([0.2, 0.2, 0.7, 0.7])
self.cax = self.fig.add_axes([0.2, 0.10, 0.7, 0.02])
self.VplotAxes.change(self.ax)
# self._update_axes()
def _update_fig_ax(self):
'''Set the figure and axis to plot.'''
if self.plot_type in ("radarAirborne", "radarRhi"):
self.YSIZE = 5
else:
self.YSIZE = 8
xwidth = 0.7
yheight = 0.7 # * float(self.YSIZE) / float(self.XSIZE)
self.ax.set_position([0.2, 0.55-0.5*yheight, xwidth, yheight])
self.cax.set_position([0.2, 0.10, xwidth, 0.02])
self._update_axes()
def _change_axes_position(self):
'''GUI change axes Position.'''
options_type = [
("Plot area top", float),
("Plot area left", float),
("Plot area bottom", float),
("Plot area right", float),
("Colormap top", float),
("Colormap left", float),
("Colormap bottom", float),
("Colormap right", float),
]
ax_pos = self.ax.get_position()
cax_pos = self.cax.get_position()
value = {
"Plot area bottom": ax_pos.y0,
"Plot area left": ax_pos.x0,
"Plot area top": ax_pos.y0+ax_pos.height,
"Plot area right": ax_pos.x0+ax_pos.width,
"Colormap bottom": cax_pos.y0,
"Colormap left": cax_pos.x0,
"Colormap top": cax_pos.y0+cax_pos.height,
"Colormap right": cax_pos.x0+cax_pos.width,
}
parm = common.get_options(options_type, value)
self.ax.set_position([parm["Plot area left"],
parm["Plot area bottom"],
parm["Plot area right"] -
parm["Plot area left"],
parm["Plot area top"] -
parm["Plot area bottom"],
])
self.cax.set_position([parm["Colormap left"],
parm["Colormap bottom"],
parm["Colormap right"] -
parm["Colormap left"],
parm["Colormap top"] -
parm["Colormap bottom"],
])
self._update_axes()
def _set_figure_canvas(self):
'''Set the figure canvas to draw in window area.'''
self.canvas = FigureCanvasQTAgg(self.fig)
# Add the widget to the canvas
self.layout.addWidget(self.canvas, 1, 0, 7, 6)
def _update_display(self):
if self.plot_type == "radarAirborne":
from pkg_resources import parse_version
if parse_version(pyart.__version__) >= parse_version('1.6.0'):
display = pyart.graph.AirborneRadarDisplay(
self.Vradar.value)
elif self.plot_type == "radarPpiMap":
display = pyart.graph.RadarMapDisplay(self.Vradar.value)
elif self.plot_type == "radarPpi":
display = pyart.graph.RadarDisplay(self.Vradar.value)
elif self.plot_type == "radarRhi":
display = pyart.graph.RadarDisplay(self.Vradar.value)
self.VpyartDisplay.change(display)
def _update_plot(self):
'''Draw/Redraw the plot.'''
if self.Vradar.value is None:
return
# Create the plot with PyArt RadarDisplay
self.ax.cla() # Clear the plot axes
self.cax.cla() # Clear the colorbar axes
self.VplotAxes.update()
if self.Vfield.value not in self.Vradar.value.fields.keys():
self.canvas.draw()
self.statusbar.setStyleSheet("QStatusBar{padding-left:8px;" +
"background:rgba(255,0,0,255);" +
"color:black;font-weight:bold;}")
self.statusbar.showMessage("Field not Found in Radar", msecs=5000)
return
else:
self.statusbar.setStyleSheet("QStatusBar{padding-left:8px;" +
"background:rgba(0,0,0,0);" +
"color:black;font-weight:bold;}")
self.statusbar.clearMessage()
title = self.title
limits = self.Vlimits.value
cmap = self.Vcolormap.value
display = self.VpyartDisplay.value
if self.gatefilterToggle.isChecked():
gatefilter = self.Vgatefilter.value
else:
gatefilter = None
if self.ignoreEdgesToggle.isChecked():
ignoreEdges = False
else:
ignoreEdges = True
if 'norm' in cmap:
norm = cmap['norm']
mask_outside = False
else:
norm = None
mask_outside = True
if self.plot_type == "radarAirborne":
self.plot = display.plot_sweep_grid(
self.Vfield.value, vmin=cmap['vmin'],
vmax=cmap['vmax'], colorbar_flag=False, cmap=cmap['cmap'],
norm=norm, mask_outside=mask_outside,
edges=ignoreEdges, gatefilter=gatefilter,
ax=self.ax, fig=self.fig, title=title)
display.plot_grid_lines()
elif self.plot_type == "radarPpi" or self.plot_type == "radarPpiMap":
# Create Plot
if self.useMapToggle.isChecked():
plot_ppi = display.plot_ppi_map
else:
plot_ppi = display.plot_ppi
self.plot = plot_ppi(
self.Vfield.value, self.Vtilt.value,
vmin=cmap['vmin'], vmax=cmap['vmax'], norm=norm,
colorbar_flag=False, cmap=cmap['cmap'], mask_outside=mask_outside,
edges=ignoreEdges, gatefilter=gatefilter,
ax=self.ax, fig=self.fig, title=title)
# Add range rings
if self.RngRing:
display.plot_range_rings(self.RNG_RINGS, ax=self.ax)
# Add radar location
display.plot_cross_hair(5., ax=self.ax)
elif self.plot_type == "radarRhi":
# Create Plot
self.plot = display.plot_rhi(
self.Vfield.value, self.Vtilt.value,
vmin=cmap['vmin'], vmax=cmap['vmax'], norm=norm,
colorbar_flag=False, cmap=cmap['cmap'], mask_outside=mask_outside,
edges=ignoreEdges, gatefilter=gatefilter,
ax=self.ax, fig=self.fig, title=title)
# Add range rings
if self.RngRing:
display.plot_range_rings(self.RNG_RINGS, ax=self.ax)
self._update_axes()
if norm is None:
norm = mlabNormalize(vmin=cmap['vmin'],
vmax=cmap['vmax'])
if self.colormapToggle.isChecked():
self.cbar = mlabColorbarBase(self.cax, cmap=cmap['cmap'],
norm=norm, orientation='horizontal')
self.cbar.set_label(self.units)
self.cax.set_visible(True)
else:
self.cax.set_visible(False)
# print "Plotting %s field, Tilt %d in %s" % (
# self.Vfield.value, self.Vtilt.value+1, self.name)
self.canvas.draw()
def _update_axes(self):
'''Change the Plot Axes.'''
limits = self.Vlimits.value
self.ax.set_xlim(limits['xmin'], limits['xmax'])
self.ax.set_ylim(limits['ymin'], limits['ymax'])
self.ax.figure.canvas.draw()
#########################
# Check methods #
#########################
def _check_file_type(self):
'''Check file to see if the file is airborne or rhi.'''
radar = self.Vradar.value
old_plot_type = self.plot_type
if radar.scan_type != 'rhi':
if self.useMapToggle.isChecked():
self.plot_type = "radarPpiMap"
else:
self.plot_type = "radarPpi"
else:
if 'platform_type' in radar.metadata:
if ('aircraft' in radar.metadata['platform_type'] or
'aircraft_tail' in radar.metadata['platform_type']):
self.plot_type = "radarAirborne"
else:
self.plot_type = "radarRhi"
else:
self.plot_type = "radarRhi"
if self.plot_type != old_plot_type:
print("Changed Scan types, reinitializing", file=log.debug)
self.toolResetCmd()
self._set_default_limits()
self._update_fig_ax()
def _set_default_limits(self, strong=True):
''' Set limits to pre-defined default.'''
from .limits import _default_limits
limits, cmap = _default_limits(
self.Vfield.value, self.plot_type)
self.Vlimits.change(limits, strong)
def _set_default_cmap(self, strong=True):
''' Set colormap to pre-defined default.'''
cmap = pyart.config.get_field_colormap(self.Vfield.value)
d = {}
d['cmap'] = cmap
d['lock'] = False
lims = pyart.config.get_field_limits(self.Vfield.value,
self.Vradar.value,
self.Vtilt.value)
if lims != (None, None):
d['vmin'] = lims[0]
d['vmax'] = lims[1]
else:
d['vmin'] = -10
d['vmax'] = 65
# HACK while pyart don't implemt it self
if (self.Vradar.value is not None and
self.Vfield.value in self.Vradar.value.fields):
if 'valid_min' in self.Vradar.value.fields[self.Vfield.value]:
d['vmin'] = self.Vradar.value.fields[self.Vfield.value][
'valid_min']
if 'valid_max' in self.Vradar.value.fields[self.Vfield.value]:
d['vmax'] = self.Vradar.value.fields[self.Vfield.value][
'valid_max']
self.Vcolormap.change(d, strong)
def _get_default_title(self):
'''Get default title from pyart.'''
if (self.Vradar.value is None or
self.Vfield.value not in self.Vradar.value.fields):
return ''
return pyart.graph.common.generate_title(self.Vradar.value,
self.Vfield.value,
self.Vtilt.value)
def _get_default_units(self):
'''Get default units for current radar and field.'''
if self.Vradar.value is not None:
try:
return self.Vradar.value.fields[self.Vfield.value]['units']
except:
return ''
else:
return ''
########################
# Image save methods #
########################
def _quick_savefile(self, PTYPE=IMAGE_EXT):
'''Save the current display via PyArt interface.'''
imagename = self.VpyartDisplay.value.generate_filename(
self.Vfield.value, self.Vtilt.value, ext=IMAGE_EXT)
self.canvas.print_figure(os.path.join(os.getcwd(), imagename), dpi=DPI)
self.statusbar.showMessage(
'Saved to %s' % os.path.join(os.getcwd(), imagename))
def _savefile(self, PTYPE=IMAGE_EXT):
'''Save the current display using PyQt dialog interface.'''
PBNAME = self.VpyartDisplay.value.generate_filename(
self.Vfield.value, self.Vtilt.value, ext=IMAGE_EXT)
file_choices = "PNG (*.png)|*.png"
path = unicode(QtWidgets.QFileDialog.getSaveFileName(
self, 'Save file', PBNAME, file_choices))
if path:
self.canvas.print_figure(path, dpi=DPI)
self.statusbar.showMessage('Saved to %s' % path)
def minimumSizeHint(self):
return QtCore.QSize(20, 20)
########################
# get methods #
########################
def getPlotAxis(self):
'''Get :py:class:`matplotlib.axes.Axes` instance of main plot.'''
return self.ax
def getStatusBar(self):
'''Get :py:class:`PyQt4.QtWidgets.QStatusBar` instance.'''
return self.statusbar
def getField(self):
'''Get current field.'''
return self.Vfield.value
def getUnits(self):
'''Get current units.'''
return self.units
def getRadar(self):
''' get current radar '''
return self.Vradar.value
def getTilt(self):
''' get current tilt '''
return self.Vtilt.value
class _DisplayStart(QtWidgets.QDialog):
'''
Dialog Class for graphical start of display, to be used in guiStart.
'''
def __init__(self):
'''Initialize the class to create the interface.'''
super(_DisplayStart, self).__init__()
self.result = {}
self.layout = QtWidgets.QGridLayout(self)
# set window as modal
self.setWindowModality(QtCore.Qt.ApplicationModal)
self.setupUi()
def chooseRadar(self):
item = VariableChoose().chooseVariable()
if item is None:
return
else:
self.result["Vradar"] = getattr(item[1], item[2])
def chooseField(self):
item = VariableChoose().chooseVariable()
if item is None:
return
else:
self.result["Vfield"] = getattr(item[1], item[2])
def chooseTilt(self):
item = VariableChoose().chooseVariable()
if item is None:
return
else:
self.result["Vtilt"] = getattr(item[1], item[2])
def chooseLims(self):
item = VariableChoose().chooseVariable()
if item is None:
return
else:
self.result["Vlimits"] = getattr(item[1], item[2])
def setupUi(self):
self.radarButton = QtWidgets.QPushButton("Find Variable")
self.radarButton.clicked.connect(self.chooseRadar)
self.layout.addWidget(QtWidgets.QLabel("VRadar"), 0, 0)
self.layout.addWidget(self.radarButton, 0, 1, 1, 3)
self.fieldButton = QtWidgets.QPushButton("Find Variable")
self.fieldButton.clicked.connect(self.chooseField)
self.layout.addWidget(QtWidgets.QLabel("Vfield"), 1, 0)
self.field = QtWidgets.QLineEdit("")
self.layout.addWidget(self.field, 1, 1)
self.layout.addWidget(QtWidgets.QLabel("or"), 1, 2)
self.layout.addWidget(self.fieldButton, 1, 3)
self.tiltButton = QtWidgets.QPushButton("Find Variable")
self.tiltButton.clicked.connect(self.chooseTilt)
self.layout.addWidget(QtWidgets.QLabel("Vtilt"), 2, 0)
self.tilt = QtWidgets.QSpinBox()
self.layout.addWidget(self.tilt, 2, 1)
self.layout.addWidget(QtWidgets.QLabel("or"), 2, 2)
self.layout.addWidget(self.tiltButton, 2, 3)
self.limsButton = QtWidgets.QPushButton("Find Variable")
self.limsButton.clicked.connect(self.chooseLims)
self.layout.addWidget(QtWidgets.QLabel("Vlimits"), 3, 0)
self.layout.addWidget(self.limsButton, 3, 1, 1, 3)
self.name = QtWidgets.QLineEdit("RadarDisplay")
self.layout.addWidget(QtWidgets.QLabel("name"), 4, 0)
self.layout.addWidget(self.name, 4, 1, 1, 3)
self.closeButton = QtWidgets.QPushButton("Start")
self.closeButton.clicked.connect(self.closeDialog)
self.layout.addWidget(self.closeButton, 5, 0, 1, 5)
def closeDialog(self):
self.done(QtWidgets.QDialog.Accepted)
def startDisplay(self):
self.exec_()
# if no Vradar abort
if 'Vradar' not in self.result:
self.result['Vradar'] = Variable(None)
# common.ShowWarning("Must select a variable for Vradar")
# I'm allowing this to continue, but this will result in error
# if Vfield, Vtilt, Vlimits were not select create new
field = str(self.field.text())
tilt = self.tilt.value()
if 'Vfield' not in self.result:
self.result['Vfield'] = Variable(field)
if 'Vtilt' not in self.result:
self.result['Vtilt'] = Variable(tilt)
self.result['name'] = str(self.name.text())
return self.result
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Project.popularity'
db.add_column(u'projects_project', 'popularity',
self.gf('django.db.models.fields.FloatField')(null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Project.popularity'
db.delete_column(u'projects_project', 'popularity')
models = {
u'accounts.bluebottleuser': {
'Meta': {'object_name': 'BlueBottleUser'},
'about': ('django.db.models.fields.TextField', [], {'max_length': '265', 'blank': 'True'}),
'availability': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'available_time': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'birthdate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'newsletter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'picture': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'primary_language': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'share_money': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'share_time_knowledge': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'user_type': ('django.db.models.fields.CharField', [], {'default': "'person'", 'max_length': '25'}),
'username': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'why': ('django.db.models.fields.TextField', [], {'max_length': '265', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'geo.country': {
'Meta': {'ordering': "['name']", 'object_name': 'Country'},
'alpha2_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'alpha3_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),
'oda_recipient': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subregion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.SubRegion']"})
},
u'geo.region': {
'Meta': {'ordering': "['name']", 'object_name': 'Region'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'})
},
u'geo.subregion': {
'Meta': {'ordering': "['name']", 'object_name': 'SubRegion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.Region']"})
},
u'organizations.organization': {
'Meta': {'ordering': "['name']", 'object_name': 'Organization'},
'account_bank_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_bank_country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.Country']", 'null': 'True', 'blank': 'True'}),
'account_bank_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_bic': ('django_iban.fields.SWIFTBICField', [], {'max_length': '11', 'blank': 'True'}),
'account_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_iban': ('django_iban.fields.IBANField', [], {'max_length': '34', 'blank': 'True'}),
'account_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legal_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'partner_organizations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'registration': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'skype': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'projects.partnerorganization': {
'Meta': {'object_name': 'PartnerOrganization'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'projects.project': {
'Meta': {'ordering': "['title']", 'object_name': 'Project'},
'coach': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'team_member'", 'null': 'True', 'to': u"orm['accounts.BlueBottleUser']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['accounts.BlueBottleUser']"}),
'partner_organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.PartnerOrganization']", 'null': 'True', 'blank': 'True'}),
'phase': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'popularity': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'projects.projectambassador': {
'Meta': {'object_name': 'ProjectAmbassador'},
'description': ('django.db.models.fields.TextField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project_plan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.ProjectPlan']"})
},
u'projects.projectbudgetline': {
'Meta': {'object_name': 'ProjectBudgetLine'},
'amount': ('django.db.models.fields.PositiveIntegerField', [], {}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': '10'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project_plan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.ProjectPlan']"})
},
u'projects.projectcampaign': {
'Meta': {'object_name': 'ProjectCampaign'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': "'10'"}),
'deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'money_asked': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'money_donated': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'project': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['projects.Project']", 'unique': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'projects.projectpitch': {
'Meta': {'object_name': 'ProjectPitch'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.Country']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '21', 'decimal_places': '18', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '21', 'decimal_places': '18', 'blank': 'True'}),
'need': ('django.db.models.fields.CharField', [], {'default': "'both'", 'max_length': '20', 'null': 'True'}),
'pitch': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'project': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['projects.Project']", 'unique': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'theme': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.ProjectTheme']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'video_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '100', 'blank': 'True'})
},
u'projects.projectplan': {
'Meta': {'object_name': 'ProjectPlan'},
'campaign': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.Country']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'effects': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'for_who': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'future': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255', 'blank': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '21', 'decimal_places': '18', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '21', 'decimal_places': '18', 'blank': 'True'}),
'money_needed': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'need': ('django.db.models.fields.CharField', [], {'default': "'both'", 'max_length': '20', 'null': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organizations.Organization']", 'null': 'True', 'blank': 'True'}),
'pitch': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'project': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['projects.Project']", 'unique': 'True'}),
'reach': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'theme': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.ProjectTheme']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'video_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'projects.projectresult': {
'Meta': {'object_name': 'ProjectResult'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['projects.Project']", 'unique': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'projects.projecttheme': {
'Meta': {'ordering': "['name']", 'object_name': 'ProjectTheme'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['projects']
|
|
import datetime
import math
import traceback
try:
import subprocess32 as subprocess
except Exception:
import subprocess
from pandaharvester.harvestercore.work_spec import WorkSpec
from .base_worker_maker import BaseWorkerMaker
from pandaharvester.harvestercore import core_utils
from pandaharvester.harvestermisc.info_utils import PandaQueuesDict
# simple backfill eventservice maker
# logger
_logger = core_utils.setup_logger('simple_bf_worker_maker')
class SimpleBackfillESWorkerMaker(BaseWorkerMaker):
# constructor
def __init__(self, **kwarg):
self.jobAttributesToUse = ['nCore', 'minRamCount', 'maxDiskCount', 'maxWalltime']
self.adjusters = None
BaseWorkerMaker.__init__(self, **kwarg)
self.init_adjusters_defaults()
self.dyn_resources = None
# make a worker from jobs
def make_worker(self, jobspec_list, queue_config, resource_type):
tmpLog = self.make_logger(_logger, 'queue={0}'.format(queue_config.queueName),
method_name='make_worker')
tmpLog.debug('jobspec_list: {0}'.format(jobspec_list))
workSpec = WorkSpec()
workSpec.creationTime = datetime.datetime.utcnow()
# get the queue configuration from the DB
panda_queues_dict = PandaQueuesDict()
queue_dict = panda_queues_dict.get(queue_config.queueName, {})
workSpec.minRamCount = queue_dict.get('maxrss', 1) or 1
workSpec.maxWalltime = queue_dict.get('maxtime', 1)
workSpec.maxDiskCount = queue_dict.get('maxwdir', 1)
# get info from jobs
if len(jobspec_list) > 0:
nRemainingEvents = 0
for jobspec in jobspec_list:
if jobspec.nRemainingEvents:
nRemainingEvents += jobspec.nRemainingEvents
nCore, maxWalltime = self.calculate_worker_requirements(nRemainingEvents)
workSpec.nCore = nCore
workSpec.maxWalltime = maxWalltime
# TODO: this needs to be improved with real resource types
if resource_type and resource_type != 'ANY':
workSpec.resourceType = resource_type
elif workSpec.nCore == 1:
workSpec.resourceType = 'SCORE'
else:
workSpec.resourceType = 'MCORE'
return workSpec
# get number of workers per job
def get_num_workers_per_job(self, n_workers):
try:
# return min(self.nWorkersPerJob, n_workers)
return self.nWorkersPerJob
except Exception:
return 1
# check number of ready resources
def num_ready_resources(self):
# make logger
tmpLog = self.make_logger(_logger, 'simple_bf_es_maker',
method_name='num_ready_resources')
try:
resources = self.get_bf_resources()
if resources:
resources = self.adjust_resources(resources)
if resources:
self.dyn_resources = resources
return len(self.dyn_resources)
return 0
except Exception:
tmpLog.error("Failed to get num of ready resources: %s" % (traceback.format_exc()))
return 0
def init_adjusters_defaults(self):
"""
adjusters: [{"minNodes": <minNodes>,
"maxNodes": <maxNodes>,
"minWalltimeSeconds": <minWalltimeSeconds>,
"maxWalltimeSeconds": <maxWalltimeSeconds>,
"nodesToDecrease": <nodesToDecrease>,
"walltimeSecondsToDecrease": <walltimeSecondsToDecrease>,
"minCapacity": <minWalltimeSeconds> * <minNodes>,
"maxCapacity": <maxWalltimeSeconds> * <maxNodes>}]
"""
adj_defaults = {"minNodes": 1,
"maxNodes": 125,
"minWalltimeSeconds": 1800,
"maxWalltimeSeconds": 7200,
"nodesToDecrease": 1,
"walltimeSecondsToDecrease": 60}
if self.adjusters:
for adjuster in self.adjusters:
for key, value in adj_defaults.items():
if key not in adjuster:
adjuster[key] = value
adjuster['minCapacity'] = adjuster['minWalltimeSeconds'] * adjuster['minNodes']
adjuster['maxCapacity'] = adjuster['maxWalltimeSeconds'] * adjuster['maxNodes']
self.adjusters.sort(key=lambda my_dict: my_dict['minNodes'])
# get backfill resources
def get_bf_resources(self, blocking=True):
# make logger
tmpLog = self.make_logger(_logger, 'simple_bf_es_maker',
method_name='get_bf_resources')
resources = []
# command
if blocking:
comStr = "showbf -p {0} --blocking".format(self.partition)
else:
comStr = "showbf -p {0}".format(self.partition)
# get backfill resources
tmpLog.debug('Get backfill resources with {0}'.format(comStr))
p = subprocess.Popen(comStr.split(),
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# check return code
stdOut, stdErr = p.communicate()
retCode = p.returncode
tmpLog.debug('retCode={0}'.format(retCode))
if retCode == 0:
# extract batchID
tmpLog.debug("Available backfill resources for partition(%s):\n%s" % (self.partition, stdOut))
lines = stdOut.splitlines()
for line in lines:
line = line.strip()
if line.startswith(self.partition):
try:
items = line.split()
nodes = int(items[2])
if nodes < self.minNodes:
continue
walltime = items[3]
resources.append({'nodes': nodes, 'walltime': walltime})
except:
tmpLog.error("Failed to parse line: %s" % line)
else:
# failed
errStr = stdOut + ' ' + stdErr
tmpLog.error(errStr)
tmpLog.info("Available backfill resources: %s" % resources)
return resources
def get_adjuster(self, nodes):
for adj in self.adjusters:
if nodes >= adj['minNodes'] and nodes <= adj['maxNodes']:
return adj
return None
def adjust_resources(self, resources):
# make logger
tmpLog = self.make_logger(_logger, 'simple_bf_es_maker',
method_name='adjust_resources')
ret_resources = []
for resource in resources:
if resource['nodes'] > self.maxNodes:
nodes = self.maxNodes
else:
nodes = resource['nodes']
adjuster = self.get_adjuster(nodes)
if adjuster:
if (resource['nodes'] - adjuster['nodesToDecrease']) < nodes:
nodes = resource['nodes'] - adjuster['nodesToDecrease']
if nodes <= 0:
continue
walltime = resource['walltime']
if walltime == 'INFINITY':
walltime = adjuster['maxWalltimeSeconds']
ret_resources.append({'nodes': nodes, 'walltime': walltime, 'nCore': nodes * self.nCorePerNode})
else:
h, m, s = walltime.split(':')
walltime = int(h) * 3600 + int(m) * 60 + int(s)
if walltime >= adjuster['minWalltimeSeconds'] and walltime < adjuster['maxWalltimeSeconds']:
walltime -= adjuster['walltimeSecondsToDecrease']
ret_resources.append({'nodes': nodes, 'walltime': walltime, 'nCore': nodes * self.nCorePerNode})
elif walltime >= adjuster['maxWalltimeSeconds']:
walltime = adjuster['maxWalltimeSeconds'] - adjuster['walltimeSecondsToDecrease']
ret_resources.append({'nodes': nodes, 'walltime': walltime, 'nCore': nodes * self.nCorePerNode})
ret_resources.sort(key=lambda my_dict: my_dict['nodes'] * my_dict['walltime'], reverse=True)
tmpLog.info("Available backfill resources after adjusting: %s" % ret_resources)
return ret_resources
def get_dynamic_resource(self, queue_name, resource_type):
resources = self.get_bf_resources()
if resources:
resources = self.adjust_resources(resources)
if resources:
return {'nNewWorkers': 1, 'resources': resources}
return {}
def get_needed_nodes_walltime(self, availNodes, availWalltime, neededCapacity):
tmpLog = self.make_logger(_logger, 'simple_bf_es_maker',
method_name='get_needed_nodes_walltime')
solutions = []
spareNodes = 1 # one Yoda node which doesn't process any events
for adj in self.adjusters:
if availNodes < adj['minNodes']:
continue
solutionNodes = min(availNodes, adj['maxNodes'])
solutionWalltime = min(availWalltime, adj['maxWalltimeSeconds'] - adj['walltimeSecondsToDecrease'])
if neededCapacity >= (solutionNodes - spareNodes) * solutionWalltime:
solutions.append({'solutionNodes': solutionNodes, 'solutionWalltime': solutionWalltime})
else:
solutionNodes = neededCapacity / solutionWalltime + spareNodes
if solutionNodes >= adj['minNodes']:
solutions.append({'solutionNodes': solutionNodes, 'solutionWalltime': solutionWalltime})
else:
solutionNodes = adj['minNodes']
requiredWalltime = neededCapacity / (solutionNodes - spareNodes)
if requiredWalltime >= adj['minWalltimeSeconds']:
# walltime can be bigger than the requiredWalltime, will exit automatically
solutions.append({'solutionNodes': solutionNodes, 'solutionWalltime': solutionWalltime})
def solution_compare(x, y):
if x['solutionWalltime'] - y['solutionWalltime'] != 0:
return x['solutionWalltime'] - y['solutionWalltime']
else:
return x['solutionNodes'] - y['solutionNodes']
solutions.sort(cmp=solution_compare, reverse=True)
tmpLog.info("Available solutions: %s" % solutions)
if solutions:
return solutions[0]['solutionNodes'], solutions[0]['solutionWalltime']
else:
None
# calculate needed cores and maxwalltime
def calculate_worker_requirements(self, nRemainingEvents):
tmpLog = self.make_logger(_logger, 'simple_bf_es_maker',
method_name='calculate_worker_requirements')
if not hasattr(self, 'nSecondsPerEvent') or self.nSecondsPerEvent < 100:
tmpLog.warn("nSecondsPerEvent is not set, will use default value 480 seconds(8 minutes)")
nSecondsPerEvent = 480
else:
nSecondsPerEvent = self.nSecondsPerEvent
nCore = None
walltime = None
if self.dyn_resources:
resource = self.dyn_resources.pop(0)
tmpLog.debug("Selected dynamic resources: %s" % resource)
walltime = resource['walltime']
if nRemainingEvents <= 0:
if resource['nodes'] < self.defaultNodes:
nCore = resource['nodes'] * self.nCorePerNode
else:
tmpLog.warn("nRemainingEvents is not correctly propagated or delayed, will not submit big jobs, shrink number of nodes to default")
nCore = self.defaultNodes * self.nCorePerNode
else:
neededCapacity = nRemainingEvents * nSecondsPerEvent * 1.0 / self.nCorePerNode
tmpLog.info("nRemainingEvents: %s, nSecondsPerEvent: %s, nCorePerNode: %s, neededCapacity(nodes*walltime): %s" % (nRemainingEvents,
nSecondsPerEvent,
self.nCorePerNode,
neededCapacity))
neededNodes, neededWalltime = self.get_needed_nodes_walltime(resource['nodes'], walltime, neededCapacity)
tmpLog.info("neededNodes: %s, neededWalltime: %s" % (neededNodes, neededWalltime))
neededNodes = int(math.ceil(neededNodes))
walltime = int(neededWalltime)
if neededNodes < 2:
neededNodes = 2
nCore = neededNodes * self.nCorePerNode
else:
nCore = self.defaultNodes * self.nCorePerNode
walltime = self.defaultWalltimeSeconds
return nCore, walltime
|
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_context import context as o_context
from oslo_context import fixture as o_fixture
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
class ContextTestCase(test.NoDBTestCase):
# NOTE(danms): Avoid any cells setup by claiming we will
# do things ourselves.
USES_DB_SELF = True
def setUp(self):
super(ContextTestCase, self).setUp()
self.useFixture(o_fixture.ClearRequestContext())
def test_request_context_elevated(self):
user_ctxt = context.RequestContext('111',
'222',
is_admin=False)
self.assertFalse(user_ctxt.is_admin)
admin_ctxt = user_ctxt.elevated()
self.assertTrue(admin_ctxt.is_admin)
self.assertIn('admin', admin_ctxt.roles)
self.assertFalse(user_ctxt.is_admin)
self.assertNotIn('admin', user_ctxt.roles)
def test_request_context_sets_is_admin(self):
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
self.assertTrue(ctxt.is_admin)
def test_request_context_sets_is_admin_by_role(self):
ctxt = context.RequestContext('111',
'222',
roles=['administrator'])
self.assertTrue(ctxt.is_admin)
def test_request_context_sets_is_admin_upcase(self):
ctxt = context.RequestContext('111',
'222',
roles=['Admin', 'weasel'])
self.assertTrue(ctxt.is_admin)
def test_request_context_read_deleted(self):
ctxt = context.RequestContext('111',
'222',
read_deleted='yes')
self.assertEqual('yes', ctxt.read_deleted)
ctxt.read_deleted = 'no'
self.assertEqual('no', ctxt.read_deleted)
def test_request_context_read_deleted_invalid(self):
self.assertRaises(ValueError,
context.RequestContext,
'111',
'222',
read_deleted=True)
ctxt = context.RequestContext('111', '222')
self.assertRaises(ValueError,
setattr,
ctxt,
'read_deleted',
True)
def test_service_catalog_default(self):
ctxt = context.RequestContext('111', '222')
self.assertEqual([], ctxt.service_catalog)
ctxt = context.RequestContext('111', '222',
service_catalog=[])
self.assertEqual([], ctxt.service_catalog)
ctxt = context.RequestContext('111', '222',
service_catalog=None)
self.assertEqual([], ctxt.service_catalog)
def test_service_catalog_filter(self):
service_catalog = [
{u'type': u'compute', u'name': u'nova'},
{u'type': u's3', u'name': u's3'},
{u'type': u'image', u'name': u'glance'},
{u'type': u'volumev3', u'name': u'cinderv3'},
{u'type': u'network', u'name': u'neutron'},
{u'type': u'ec2', u'name': u'ec2'},
{u'type': u'object-store', u'name': u'swift'},
{u'type': u'identity', u'name': u'keystone'},
{u'type': u'block-storage', u'name': u'cinder'},
{u'type': None, u'name': u'S_withouttype'},
{u'type': u'vo', u'name': u'S_partofvolume'}]
volume_catalog = [{u'type': u'image', u'name': u'glance'},
{u'type': u'volumev3', u'name': u'cinderv3'},
{u'type': u'network', u'name': u'neutron'},
{u'type': u'block-storage', u'name': u'cinder'}]
ctxt = context.RequestContext('111', '222',
service_catalog=service_catalog)
self.assertEqual(volume_catalog, ctxt.service_catalog)
def test_to_dict_from_dict_no_log(self):
warns = []
def stub_warn(msg, *a, **kw):
if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]):
a = a[0]
warns.append(str(msg) % a)
self.stub_out('nova.context.LOG.warning', stub_warn)
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
context.RequestContext.from_dict(ctxt.to_dict())
self.assertEqual(0, len(warns), warns)
def test_store_when_no_overwrite(self):
# If no context exists we store one even if overwrite is false
# (since we are not overwriting anything).
ctx = context.RequestContext('111',
'222',
overwrite=False)
self.assertIs(o_context.get_current(), ctx)
def test_no_overwrite(self):
# If there is already a context in the cache a new one will
# not overwrite it if overwrite=False.
ctx1 = context.RequestContext('111',
'222',
overwrite=True)
context.RequestContext('333',
'444',
overwrite=False)
self.assertIs(o_context.get_current(), ctx1)
def test_get_context_no_overwrite(self):
# If there is already a context in the cache creating another context
# should not overwrite it.
ctx1 = context.RequestContext('111',
'222',
overwrite=True)
context.get_context()
self.assertIs(ctx1, o_context.get_current())
def test_admin_no_overwrite(self):
# If there is already a context in the cache creating an admin
# context will not overwrite it.
ctx1 = context.RequestContext('111',
'222',
overwrite=True)
context.get_admin_context()
self.assertIs(o_context.get_current(), ctx1)
def test_convert_from_rc_to_dict(self):
ctx = context.RequestContext(
111, 222, request_id='req-679033b7-1755-4929-bf85-eb3bfaef7e0b',
timestamp='2015-03-02T22:31:56.641629')
values2 = ctx.to_dict()
expected_values = {'auth_token': None,
'domain': None,
'is_admin': False,
'is_admin_project': True,
'project_id': 222,
'project_domain': None,
'project_name': None,
'quota_class': None,
'read_deleted': 'no',
'read_only': False,
'remote_address': None,
'request_id':
'req-679033b7-1755-4929-bf85-eb3bfaef7e0b',
'resource_uuid': None,
'roles': [],
'service_catalog': [],
'show_deleted': False,
'tenant': 222,
'timestamp': '2015-03-02T22:31:56.641629',
'user': 111,
'user_domain': None,
'user_id': 111,
'user_identity': '111 222 - - -',
'user_name': None}
for k, v in expected_values.items():
self.assertIn(k, values2)
self.assertEqual(values2[k], v)
@mock.patch.object(context.policy, 'authorize')
def test_can(self, mock_authorize):
mock_authorize.return_value = True
ctxt = context.RequestContext('111', '222')
result = ctxt.can(mock.sentinel.rule)
self.assertTrue(result)
mock_authorize.assert_called_once_with(
ctxt, mock.sentinel.rule, None)
@mock.patch.object(context.policy, 'authorize')
def test_can_fatal(self, mock_authorize):
mock_authorize.side_effect = exception.Forbidden
ctxt = context.RequestContext('111', '222')
self.assertRaises(exception.Forbidden,
ctxt.can, mock.sentinel.rule)
@mock.patch.object(context.policy, 'authorize')
def test_can_non_fatal(self, mock_authorize):
mock_authorize.side_effect = exception.Forbidden
ctxt = context.RequestContext('111', '222')
result = ctxt.can(mock.sentinel.rule, mock.sentinel.target,
fatal=False)
self.assertFalse(result)
mock_authorize.assert_called_once_with(ctxt, mock.sentinel.rule,
mock.sentinel.target)
@mock.patch('nova.rpc.create_transport')
@mock.patch('nova.db.api.create_context_manager')
def test_target_cell(self, mock_create_ctxt_mgr, mock_rpc):
mock_create_ctxt_mgr.return_value = mock.sentinel.cdb
mock_rpc.return_value = mock.sentinel.cmq
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
# Verify the existing db_connection, if any, is restored
ctxt.db_connection = mock.sentinel.db_conn
ctxt.mq_connection = mock.sentinel.mq_conn
mapping = objects.CellMapping(database_connection='fake://',
transport_url='fake://',
uuid=uuids.cell)
with context.target_cell(ctxt, mapping) as cctxt:
self.assertEqual(cctxt.db_connection, mock.sentinel.cdb)
self.assertEqual(cctxt.mq_connection, mock.sentinel.cmq)
self.assertEqual(cctxt.cell_uuid, mapping.uuid)
self.assertEqual(mock.sentinel.db_conn, ctxt.db_connection)
self.assertEqual(mock.sentinel.mq_conn, ctxt.mq_connection)
self.assertIsNone(ctxt.cell_uuid)
# Test again now that we have populated the cache
with context.target_cell(ctxt, mapping) as cctxt:
self.assertEqual(cctxt.db_connection, mock.sentinel.cdb)
self.assertEqual(cctxt.mq_connection, mock.sentinel.cmq)
self.assertEqual(cctxt.cell_uuid, mapping.uuid)
@mock.patch('nova.rpc.create_transport')
@mock.patch('nova.db.api.create_context_manager')
def test_target_cell_unset(self, mock_create_ctxt_mgr, mock_rpc):
"""Tests that passing None as the mapping will temporarily
untarget any previously set cell context.
"""
mock_create_ctxt_mgr.return_value = mock.sentinel.cdb
mock_rpc.return_value = mock.sentinel.cmq
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
ctxt.db_connection = mock.sentinel.db_conn
ctxt.mq_connection = mock.sentinel.mq_conn
with context.target_cell(ctxt, None) as cctxt:
self.assertIsNone(cctxt.db_connection)
self.assertIsNone(cctxt.mq_connection)
self.assertEqual(mock.sentinel.db_conn, ctxt.db_connection)
self.assertEqual(mock.sentinel.mq_conn, ctxt.mq_connection)
@mock.patch('nova.context.set_target_cell')
def test_target_cell_regenerates(self, mock_set):
ctxt = context.RequestContext('fake', 'fake')
# Set a non-tracked property on the context to make sure it
# does not make it to the targeted one (like a copy would do)
ctxt.sentinel = mock.sentinel.parent
with context.target_cell(ctxt, mock.sentinel.cm) as cctxt:
# Should be a different object
self.assertIsNot(cctxt, ctxt)
# Should not have inherited the non-tracked property
self.assertFalse(hasattr(cctxt, 'sentinel'),
'Targeted context was copied from original')
# Set another non-tracked property
cctxt.sentinel = mock.sentinel.child
# Make sure we didn't pollute the original context
self.assertNotEqual(ctxt.sentinel, mock.sentinel.child)
def test_get_context(self):
ctxt = context.get_context()
self.assertIsNone(ctxt.user_id)
self.assertIsNone(ctxt.project_id)
self.assertFalse(ctxt.is_admin)
@mock.patch('nova.rpc.create_transport')
@mock.patch('nova.db.api.create_context_manager')
def test_target_cell_caching(self, mock_create_cm, mock_create_tport):
mock_create_cm.return_value = mock.sentinel.db_conn_obj
mock_create_tport.return_value = mock.sentinel.mq_conn_obj
ctxt = context.get_context()
mapping = objects.CellMapping(database_connection='fake://db',
transport_url='fake://mq',
uuid=uuids.cell)
# First call should create new connection objects.
with context.target_cell(ctxt, mapping) as cctxt:
self.assertEqual(mock.sentinel.db_conn_obj, cctxt.db_connection)
self.assertEqual(mock.sentinel.mq_conn_obj, cctxt.mq_connection)
mock_create_cm.assert_called_once_with('fake://db')
mock_create_tport.assert_called_once_with('fake://mq')
# Second call should use cached objects.
mock_create_cm.reset_mock()
mock_create_tport.reset_mock()
with context.target_cell(ctxt, mapping) as cctxt:
self.assertEqual(mock.sentinel.db_conn_obj, cctxt.db_connection)
self.assertEqual(mock.sentinel.mq_conn_obj, cctxt.mq_connection)
mock_create_cm.assert_not_called()
mock_create_tport.assert_not_called()
def test_is_cell_failure_sentinel(self):
record = context.did_not_respond_sentinel
self.assertTrue(context.is_cell_failure_sentinel(record))
record = TypeError()
self.assertTrue(context.is_cell_failure_sentinel(record))
record = objects.Instance()
self.assertFalse(context.is_cell_failure_sentinel(record))
@mock.patch('nova.context.target_cell')
@mock.patch('nova.objects.InstanceList.get_by_filters')
def test_scatter_gather_cells(self, mock_get_inst, mock_target_cell):
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
ctxt = context.get_context()
mapping = objects.CellMapping(database_connection='fake://db',
transport_url='fake://mq',
uuid=uuids.cell)
mappings = objects.CellMappingList(objects=[mapping])
filters = {'deleted': False}
context.scatter_gather_cells(
ctxt, mappings, 60, objects.InstanceList.get_by_filters, filters,
sort_dir='foo')
mock_get_inst.assert_called_once_with(
mock_target_cell.return_value.__enter__.return_value, filters,
sort_dir='foo')
@mock.patch('nova.context.LOG.warning')
@mock.patch('eventlet.timeout.Timeout')
@mock.patch('eventlet.queue.LightQueue.get')
@mock.patch('nova.objects.InstanceList.get_by_filters')
def test_scatter_gather_cells_timeout(self, mock_get_inst,
mock_get_result, mock_timeout,
mock_log_warning):
# This is needed because we're mocking get_by_filters.
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
ctxt = context.get_context()
mapping0 = objects.CellMapping(database_connection='fake://db0',
transport_url='none:///',
uuid=objects.CellMapping.CELL0_UUID)
mapping1 = objects.CellMapping(database_connection='fake://db1',
transport_url='fake://mq1',
uuid=uuids.cell1)
mappings = objects.CellMappingList(objects=[mapping0, mapping1])
# Simulate cell1 not responding.
mock_get_result.side_effect = [(mapping0.uuid,
mock.sentinel.instances),
exception.CellTimeout()]
results = context.scatter_gather_cells(
ctxt, mappings, 30, objects.InstanceList.get_by_filters)
self.assertEqual(2, len(results))
self.assertIn(mock.sentinel.instances, results.values())
self.assertIn(context.did_not_respond_sentinel, results.values())
mock_timeout.assert_called_once_with(30, exception.CellTimeout)
self.assertTrue(mock_log_warning.called)
@mock.patch('nova.context.LOG.exception')
@mock.patch('nova.objects.InstanceList.get_by_filters')
def test_scatter_gather_cells_exception(self, mock_get_inst,
mock_log_exception):
# This is needed because we're mocking get_by_filters.
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
ctxt = context.get_context()
mapping0 = objects.CellMapping(database_connection='fake://db0',
transport_url='none:///',
uuid=objects.CellMapping.CELL0_UUID)
mapping1 = objects.CellMapping(database_connection='fake://db1',
transport_url='fake://mq1',
uuid=uuids.cell1)
mappings = objects.CellMappingList(objects=[mapping0, mapping1])
# Simulate cell1 raising an exception.
mock_get_inst.side_effect = [mock.sentinel.instances,
test.TestingException()]
filters = {'deleted': False}
results = context.scatter_gather_cells(
ctxt, mappings, 30, objects.InstanceList.get_by_filters, filters)
self.assertEqual(2, len(results))
self.assertIn(mock.sentinel.instances, results.values())
self.assertIsInstance(results[mapping1.uuid], Exception)
# non-NovaException gets logged
self.assertTrue(mock_log_exception.called)
# Now run it again with a NovaException to see it's not logged.
mock_log_exception.reset_mock()
mock_get_inst.side_effect = [mock.sentinel.instances,
exception.NotFound()]
results = context.scatter_gather_cells(
ctxt, mappings, 30, objects.InstanceList.get_by_filters, filters)
self.assertEqual(2, len(results))
self.assertIn(mock.sentinel.instances, results.values())
self.assertIsInstance(results[mapping1.uuid], exception.NovaException)
# NovaExceptions are not logged, the caller should handle them.
mock_log_exception.assert_not_called()
@mock.patch('nova.context.scatter_gather_cells')
@mock.patch('nova.objects.CellMappingList.get_all')
def test_scatter_gather_all_cells(self, mock_get_all, mock_scatter):
ctxt = context.get_context()
mapping0 = objects.CellMapping(database_connection='fake://db0',
transport_url='none:///',
uuid=objects.CellMapping.CELL0_UUID)
mapping1 = objects.CellMapping(database_connection='fake://db1',
transport_url='fake://mq1',
uuid=uuids.cell1)
mock_get_all.return_value = objects.CellMappingList(
objects=[mapping0, mapping1])
filters = {'deleted': False}
context.scatter_gather_all_cells(
ctxt, objects.InstanceList.get_by_filters, filters, sort_dir='foo')
mock_scatter.assert_called_once_with(
ctxt, mock_get_all.return_value, 60,
objects.InstanceList.get_by_filters, filters, sort_dir='foo')
@mock.patch('nova.context.scatter_gather_cells')
@mock.patch('nova.objects.CellMappingList.get_all')
def test_scatter_gather_skip_cell0(self, mock_get_all, mock_scatter):
ctxt = context.get_context()
mapping0 = objects.CellMapping(database_connection='fake://db0',
transport_url='none:///',
uuid=objects.CellMapping.CELL0_UUID)
mapping1 = objects.CellMapping(database_connection='fake://db1',
transport_url='fake://mq1',
uuid=uuids.cell1)
mock_get_all.return_value = objects.CellMappingList(
objects=[mapping0, mapping1])
filters = {'deleted': False}
context.scatter_gather_skip_cell0(
ctxt, objects.InstanceList.get_by_filters, filters, sort_dir='foo')
mock_scatter.assert_called_once_with(
ctxt, [mapping1], 60, objects.InstanceList.get_by_filters, filters,
sort_dir='foo')
@mock.patch('nova.context.scatter_gather_cells')
def test_scatter_gather_single_cell(self, mock_scatter):
ctxt = context.get_context()
mapping0 = objects.CellMapping(database_connection='fake://db0',
transport_url='none:///',
uuid=objects.CellMapping.CELL0_UUID)
filters = {'deleted': False}
context.scatter_gather_single_cell(ctxt, mapping0,
objects.InstanceList.get_by_filters, filters, sort_dir='foo')
mock_scatter.assert_called_once_with(
ctxt, [mapping0], context.CELL_TIMEOUT,
objects.InstanceList.get_by_filters, filters,
sort_dir='foo')
|
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import abc
from typing import Dict, List, Union
from pyflink.common import typeinfo
from pyflink.common.serialization import DeserializationSchema, Encoder, SerializationSchema
from pyflink.common.typeinfo import RowTypeInfo
from pyflink.datastream.functions import SourceFunction, SinkFunction
from pyflink.java_gateway import get_gateway
from pyflink.util.utils import load_java_class, to_jarray
from py4j.java_gateway import java_import
class FlinkKafkaConsumerBase(SourceFunction, abc.ABC):
"""
Base class of all Flink Kafka Consumer data sources. This implements the common behavior across
all kafka versions.
The Kafka version specific behavior is defined mainly in the specific subclasses.
"""
def __init__(self, j_flink_kafka_consumer):
super(FlinkKafkaConsumerBase, self).__init__(source_func=j_flink_kafka_consumer)
def set_commit_offsets_on_checkpoints(self, commit_on_checkpoints: bool):
"""
Specifies whether or not the consumer should commit offsets back to kafka on checkpoints.
This setting will only have effect if checkpointing is enabled for the job. If checkpointing
isn't enabled, only the "auto.commit.enable" (for 0.8) / "enable.auto.commit" (for 0.9+)
property settings will be used.
"""
self._j_function = self._j_function \
.setCommitOffsetsOnCheckpoints(commit_on_checkpoints)
return self
def set_start_from_earliest(self):
"""
Specifies the consumer to start reading from the earliest offset for all partitions. This
lets the consumer ignore any committed group offsets in Zookeeper/ Kafka brokers.
This method does not affect where partitions are read from when the consumer is restored
from a checkpoint or savepoint. When the consumer is restored from a checkpoint or
savepoint, only the offsets in the restored state will be used.
"""
self._j_function = self._j_function.setStartFromEarliest()
return self
def set_start_from_latest(self):
"""
Specifies the consuer to start reading from the latest offset for all partitions. This lets
the consumer ignore any committed group offsets in Zookeeper / Kafka brokers.
This method does not affect where partitions are read from when the consumer is restored
from a checkpoint or savepoint. When the consumer is restored from a checkpoint or
savepoint, only the offsets in the restored state will be used.
"""
self._j_function = self._j_function.setStartFromLatest()
return self
def set_start_from_timestamp(self, startup_offsets_timestamp: int):
"""
Specifies the consumer to start reading partitions from a specified timestamp. The specified
timestamp must be before the current timestamp. This lets the consumer ignore any committed
group offsets in Zookeeper / Kafka brokers.
The consumer will look up the earliest offset whose timestamp is greater than or equal to
the specific timestamp from Kafka. If there's no such offset, the consumer will use the
latest offset to read data from Kafka.
This method does not affect where partitions are read from when the consumer is restored
from a checkpoint or savepoint. When the consumer is restored from a checkpoint or
savepoint, only the offsets in the restored state will be used.
:param startup_offsets_timestamp: timestamp for the startup offsets, as milliseconds for
epoch.
"""
self._j_function = self._j_function.setStartFromTimestamp(
startup_offsets_timestamp)
return self
def set_start_from_group_offsets(self):
"""
Specifies the consumer to start reading from any committed group offsets found in Zookeeper/
Kafka brokers. The 'group.id' property must be set in the configuration properties. If no
offset can be found for a partition, the behaviour in 'auto.offset.reset' set in the
configuration properties will be used for the partition.
This method does not affect where partitions are read from when the consumer is restored
from a checkpoint or savepoint. When the consumer is restored from a checkpoint or
savepoint, only the offsets in the restored state will be used.
"""
self._j_function = self._j_function.setStartFromGroupOffsets()
return self
def disable_filter_restored_partitions_with_subscribed_topics(self):
"""
By default, when restoring from a checkpoint / savepoint, the consumer always ignores
restored partitions that are no longer associated with the current specified topics or topic
pattern to subscribe to.
This method does not affect where partitions are read from when the consumer is restored
from a checkpoint or savepoint. When the consumer is restored from a checkpoint or
savepoint, only the offsets in the restored state will be used.
"""
self._j_function = self._j_function \
.disableFilterRestoredPartitionsWithSubscribedTopics()
return self
def get_produced_type(self):
return typeinfo._from_java_type(self._j_function.getProducedType())
class FlinkKafkaConsumer010(FlinkKafkaConsumerBase):
"""
The Flink Kafka Consumer is a streaming data source that pulls a parallel data stream from
Apache Kafka 0.10.x. The consumer can run in multiple parallel instances, each of which will
pull data from one or more Kafka partitions.
The Flink Kafka Consumer participates in checkpointing and guarantees that no data is lost
during a failure, and taht the computation processes elements 'exactly once. (These guarantees
naturally assume that Kafka itself does not loose any data.)
Please note that Flink snapshots the offsets internally as part of its distributed checkpoints.
The offsets committed to Kafka / Zookeeper are only to bring the outside view of progress in
sync with Flink's view of the progress. That way, monitoring and other jobs can get a view of
how far the Flink Kafka consumer has consumed a topic.
Please refer to Kafka's documentation for the available configuration properties:
http://kafka.apache.org/documentation.html#newconsumerconfigs
"""
def __init__(self, topics: Union[str, List[str]], deserialization_schema: DeserializationSchema,
properties: Dict):
"""
Creates a new Kafka streaming source consumer for Kafka 0.10.x.
This constructor allows passing multiple topics to the consumer.
:param topics: The Kafka topics to read from.
:param deserialization_schema: The de-/serializer used to convert between Kafka's byte
messages and Flink's objects.
:param properties: The properties that are used to configure both the fetcher and the offset
handler.
"""
JFlinkKafkaConsumer010 = get_gateway().jvm \
.org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010
j_flink_kafka_consumer_010 = _get_kafka_consumer(topics, properties, deserialization_schema,
JFlinkKafkaConsumer010)
super(FlinkKafkaConsumer010, self).__init__(
j_flink_kafka_consumer=j_flink_kafka_consumer_010)
class FlinkKafkaConsumer011(FlinkKafkaConsumerBase):
"""
The Flink Kafka Consumer is a streaming data source that pulls a parallel data stream from
Apache Kafka 0.10.x. The consumer can run in multiple parallel instances, each of which will
pull data from one or more Kafka partitions.
The Flink Kafka Consumer participates in checkpointing and guarantees that no data is lost
during a failure, and taht the computation processes elements 'exactly once. (These guarantees
naturally assume that Kafka itself does not loose any data.)
Please note that Flink snapshots the offsets internally as part of its distributed checkpoints.
The offsets committed to Kafka / Zookeeper are only to bring the outside view of progress in
sync with Flink's view of the progress. That way, monitoring and other jobs can get a view of
how far the Flink Kafka consumer has consumed a topic.
Please refer to Kafka's documentation for the available configuration properties:
http://kafka.apache.org/documentation.html#newconsumerconfigs
"""
def __init__(self, topics: Union[str, List[str]], deserialization_schema: DeserializationSchema,
properties: Dict):
"""
Creates a new Kafka streaming source consumer for Kafka 0.10.x.
This constructor allows passing multiple topics to the consumer.
:param topics: The Kafka topics to read from.
:param deserialization_schema: The de-/serializer used to convert between Kafka's byte
messages and Flink's objects.
:param properties: The properties that are used to configure both the fetcher and the offset
handler.
"""
JFlinkKafkaConsumer011 = get_gateway().jvm \
.org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011
j_flink_kafka_consumer_011 = _get_kafka_consumer(topics, properties, deserialization_schema,
JFlinkKafkaConsumer011)
super(FlinkKafkaConsumer011, self).__init__(j_flink_kafka_consumer_011)
class FlinkKafkaConsumer(FlinkKafkaConsumerBase):
"""
The Flink Kafka Consumer is a streaming data source that pulls a parallel data stream from
Apache Kafka 0.10.x. The consumer can run in multiple parallel instances, each of which will
pull data from one or more Kafka partitions.
The Flink Kafka Consumer participates in checkpointing and guarantees that no data is lost
during a failure, and taht the computation processes elements 'exactly once. (These guarantees
naturally assume that Kafka itself does not loose any data.)
Please note that Flink snapshots the offsets internally as part of its distributed checkpoints.
The offsets committed to Kafka / Zookeeper are only to bring the outside view of progress in
sync with Flink's view of the progress. That way, monitoring and other jobs can get a view of
how far the Flink Kafka consumer has consumed a topic.
Please refer to Kafka's documentation for the available configuration properties:
http://kafka.apache.org/documentation.html#newconsumerconfigs
"""
def __init__(self, topics: Union[str, List[str]], deserialization_schema: DeserializationSchema,
properties: Dict):
"""
Creates a new Kafka streaming source consumer for Kafka 0.10.x.
This constructor allows passing multiple topics to the consumer.
:param topics: The Kafka topics to read from.
:param deserialization_schema: The de-/serializer used to convert between Kafka's byte
messages and Flink's objects.
:param properties: The properties that are used to configure both the fetcher and the offset
handler.
"""
JFlinkKafkaConsumer = get_gateway().jvm \
.org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer
j_flink_kafka_consumer = _get_kafka_consumer(topics, properties, deserialization_schema,
JFlinkKafkaConsumer)
super(FlinkKafkaConsumer, self).__init__(j_flink_kafka_consumer=j_flink_kafka_consumer)
class FlinkKafkaProducerBase(SinkFunction, abc.ABC):
"""
Flink Sink to produce data into a Kafka topic.
Please note that this producer provides at-least-once reliability guarantees when checkpoints
are enabled and set_flush_on_checkpoint(True) is set. Otherwise, the producer doesn;t provid any
reliability guarantees.
"""
def __init__(self, j_flink_kafka_producer):
super(FlinkKafkaProducerBase, self).__init__(sink_func=j_flink_kafka_producer)
def set_log_failures_only(self, log_failures_only: bool):
"""
Defines whether the producer should fail on errors, or only log them. If this is set to
true, then exceptions will be only logged, if set to false, exceptions will be eventually
thrown and cause the streaming program to fail (and enter recovery).
:param log_failures_only: The flag to indicate logging-only on exceptions.
"""
self._j_function.setLogFailuresOnly(log_failures_only)
def set_flush_on_checkpoint(self, flush_on_checkpoint: bool):
"""
If set to true, the Flink producer will wait for all outstanding messages in the Kafka
buffers to be acknowledged by the Kafka producer on a checkpoint.
This way, the producer can guarantee that messages in the Kafka buffers are part of the
checkpoint.
:param flush_on_checkpoint: Flag indicating the flush mode (true = flush on checkpoint)
"""
self._j_function.setFlushOnCheckpoint(flush_on_checkpoint)
def set_write_timestamp_to_kafka(self, write_timestamp_to_kafka: bool):
"""
If set to true, Flink will write the (event time) timestamp attached to each record into
Kafka. Timestamps must be positive for Kafka to accept them.
:param write_timestamp_to_kafka: Flag indicating if Flink's internal timestamps are written
to Kafka.
"""
self._j_function.setWriteTimestampToKafka(write_timestamp_to_kafka)
class FlinkKafkaProducer010(FlinkKafkaProducerBase):
"""
Flink Sink to produce data into a Kafka topic. This producer is compatible with Kafka 0.10.x.
"""
def __init__(self, topic: str, serialization_schema: SerializationSchema,
producer_config: Dict):
"""
Creates a FlinkKafkaProducer for a given topic. The sink produces a DataStream to the topic.
Using this constructor, the default FlinkFixedPartitioner will be used as the partitioner.
This default partitioner maps each sink subtask to a single Kafka partition (i.e. all
records received by a sink subtask will end up in the same Kafka partition).
:param topic: ID of the Kafka topic.
:param serialization_schema: User defined key-less serialization schema.
:param producer_config: Properties with the producer configuration.
"""
gateway = get_gateway()
j_properties = gateway.jvm.java.util.Properties()
for key, value in producer_config.items():
j_properties.setProperty(key, value)
JFlinkKafkaProducer010 = gateway.jvm \
.org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer010
j_flink_kafka_producer = JFlinkKafkaProducer010(
topic, serialization_schema._j_serialization_schema, j_properties)
super(FlinkKafkaProducer010, self).__init__(j_flink_kafka_producer=j_flink_kafka_producer)
class Semantic(object):
"""
Semantics that can be chosen.
:data: `EXACTLY_ONCE`:
The Flink producer will write all messages in a Kafka transaction that will be committed to
the Kafka on a checkpoint. In this mode FlinkKafkaProducer011 sets up a pool of
FlinkKafkaProducer. Between each checkpoint there is created new Kafka transaction, which is
being committed on FlinkKafkaProducer011.notifyCheckpointComplete(long). If checkpoint
complete notifications are running late, FlinkKafkaProducer011 can run out of
FlinkKafkaProducers in the pool. In that case any subsequent FlinkKafkaProducer011.snapshot-
State() requests will fail and the FlinkKafkaProducer011 will keep using the
FlinkKafkaProducer from previous checkpoint. To decrease chances of failing checkpoints
there are four options:
1. decrease number of max concurrent checkpoints
2. make checkpoints mre reliable (so that they complete faster)
3. increase delay between checkpoints
4. increase size of FlinkKafkaProducers pool
:data: `AT_LEAST_ONCE`:
The Flink producer will wait for all outstanding messages in the Kafka buffers to be
acknowledged by the Kafka producer on a checkpoint.
:data: `NONE`:
Means that nothing will be guaranteed. Messages can be lost and/or duplicated in case of
failure.
"""
EXACTLY_ONCE = 0,
AT_LEAST_ONCE = 1,
NONE = 2
@staticmethod
def _to_j_semantic(semantic, j_semantic):
if semantic == Semantic.EXACTLY_ONCE:
return j_semantic.EXACTLY_ONCE
elif semantic == Semantic.AT_LEAST_ONCE:
return j_semantic.AT_LEAST_ONCE
elif semantic == Semantic.NONE:
return j_semantic.NONE
else:
raise TypeError("Unsupported semantic: %s, supported semantics are: "
"Semantic.EXACTLY_ONCE, Semantic.AT_LEAST_ONCE, Semantic.NONE"
% semantic)
class FlinkKafkaProducer011(FlinkKafkaProducerBase):
"""
Flink Sink to produce data into a Kafka topic. This producer is compatible with Kafka 0.11.x. By
default producer will use AT_LEAST_ONCE sematic. Before using EXACTLY_ONCE please refer to
Flink's Kafka connector documentation.
"""
def __init__(self, topic: str, serialization_schema: SerializationSchema,
producer_config: Dict, kafka_producer_pool_size: int = 5,
semantic: Semantic = Semantic.AT_LEAST_ONCE):
"""
Creates a FlinkKafkaProducer for a given topic. The sink produces a DataStream to the topic.
Using this constructor, the default FlinkFixedPartitioner will be used as the partitioner.
This default partitioner maps each sink subtask to a single Kafka partition (i.e. all
records received by a sink subtask will end up in the same Kafka partition).
:param topic: ID of the Kafka topic.
:param serialization_schema: User defined key-less serialization schema.
:param producer_config: Properties with the producer configuration.
"""
gateway = get_gateway()
j_properties = gateway.jvm.java.util.Properties()
for key, value in producer_config.items():
j_properties.setProperty(key, value)
JFlinkKafkaProducer011 = gateway.jvm \
.org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer011
JFlinkFixedPartitioner = gateway.jvm\
.org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner
JSemantic = get_gateway().jvm \
.org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer011.Semantic
j_keyed_serialization_schema = gateway.jvm\
.org.apache.flink.streaming.connectors.kafka.internals\
.KeyedSerializationSchemaWrapper(serialization_schema._j_serialization_schema)
j_flink_kafka_producer = JFlinkKafkaProducer011(
topic, j_keyed_serialization_schema, j_properties,
gateway.jvm.java.util.Optional.of(JFlinkFixedPartitioner()),
Semantic._to_j_semantic(semantic, JSemantic), kafka_producer_pool_size)
super(FlinkKafkaProducer011, self).__init__(j_flink_kafka_producer=j_flink_kafka_producer)
def ignore_failures_after_transaction_timeout(self) -> 'FlinkKafkaProducer011':
"""
Disables the propagation of exceptions thrown when committing presumably timed out Kafka
transactions during recovery of the job. If a Kafka transaction is timed out, a commit will
never be successful. Hence, use this feature to avoid recovery loops of the Job. Exceptions
will still be logged to inform the user that data loss might have occurred.
Note that we use the System.currentTimeMillis() to track the age of a transaction. Moreover,
only exceptions thrown during the recovery are caught, i.e., the producer will attempt at
least one commit of the transaction before giving up.
:return: This FlinkKafkaProducer.
"""
self._j_function.ignoreFailuresAfterTransactionTimeout()
return self
class FlinkKafkaProducer(FlinkKafkaProducerBase):
"""
Flink Sink to produce data into a Kafka topic. This producer is compatible with Kafka 0.11.x. By
default producer will use AT_LEAST_ONCE sematic. Before using EXACTLY_ONCE please refer to
Flink's Kafka connector documentation.
"""
def __init__(self, topic: str, serialization_schema: SerializationSchema,
producer_config: Dict, kafka_producer_pool_size: int = 5,
semantic: Semantic = Semantic.AT_LEAST_ONCE):
"""
Creates a FlinkKafkaProducer for a given topic. The sink produces a DataStream to the topic.
Using this constructor, the default FlinkFixedPartitioner will be used as the partitioner.
This default partitioner maps each sink subtask to a single Kafka partition (i.e. all
records received by a sink subtask will end up in the same Kafka partition).
:param topic: ID of the Kafka topic.
:param serialization_schema: User defined key-less serialization schema.
:param producer_config: Properties with the producer configuration.
"""
gateway = get_gateway()
j_properties = gateway.jvm.java.util.Properties()
for key, value in producer_config.items():
j_properties.setProperty(key, value)
JFlinkKafkaProducer = gateway.jvm \
.org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer
JSemantic = get_gateway().jvm \
.org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer.Semantic
j_flink_kafka_producer = JFlinkKafkaProducer(
topic, serialization_schema._j_serialization_schema, j_properties, None,
Semantic._to_j_semantic(semantic, JSemantic), kafka_producer_pool_size)
super(FlinkKafkaProducer, self).__init__(j_flink_kafka_producer=j_flink_kafka_producer)
def ignore_failures_after_transaction_timeout(self) -> 'FlinkKafkaProducer':
"""
Disables the propagation of exceptions thrown when committing presumably timed out Kafka
transactions during recovery of the job. If a Kafka transaction is timed out, a commit will
never be successful. Hence, use this feature to avoid recovery loops of the Job. Exceptions
will still be logged to inform the user that data loss might have occurred.
Note that we use the System.currentTimeMillis() to track the age of a transaction. Moreover,
only exceptions thrown during the recovery are caught, i.e., the producer will attempt at
least one commit of the transaction before giving up.
:return: This FlinkKafkaProducer.
"""
self._j_function.ignoreFailuresAfterTransactionTimeout()
return self
def _get_kafka_consumer(topics, properties, deserialization_schema, j_consumer_clz):
if not isinstance(topics, list):
topics = [topics]
gateway = get_gateway()
j_properties = gateway.jvm.java.util.Properties()
for key, value in properties.items():
j_properties.setProperty(key, value)
j_flink_kafka_consumer = j_consumer_clz(topics,
deserialization_schema._j_deserialization_schema,
j_properties)
return j_flink_kafka_consumer
class JdbcSink(SinkFunction):
def __init__(self, j_jdbc_sink):
super(JdbcSink, self).__init__(sink_func=j_jdbc_sink)
@staticmethod
def sink(sql: str, type_info: RowTypeInfo, jdbc_connection_options: 'JdbcConnectionOptions',
jdbc_execution_options: 'JdbcExecutionOptions' = None):
"""
Create a JDBC sink.
:param sql: arbitrary DML query (e.g. insert, update, upsert)
:param type_info: A RowTypeInfo for query field types.
:param jdbc_execution_options: parameters of execution, such as batch size and maximum
retries.
:param jdbc_connection_options: parameters of connection, such as JDBC URL.
:return: A JdbcSink.
"""
sql_types = []
gateway = get_gateway()
JJdbcTypeUtil = gateway.jvm.org.apache.flink.connector.jdbc.utils.JdbcTypeUtil
for field_type in type_info.get_field_types():
sql_types.append(JJdbcTypeUtil
.typeInformationToSqlType(field_type.get_java_type_info()))
j_sql_type = to_jarray(gateway.jvm.int, sql_types)
output_format_clz = gateway.jvm.Class\
.forName('org.apache.flink.connector.jdbc.internal.JdbcBatchingOutputFormat', False,
get_gateway().jvm.Thread.currentThread().getContextClassLoader())
j_int_array_type = to_jarray(gateway.jvm.int, []).getClass()
j_builder_method = output_format_clz.getDeclaredMethod('createRowJdbcStatementBuilder',
to_jarray(gateway.jvm.Class,
[j_int_array_type]))
j_builder_method.setAccessible(True)
j_statement_builder = j_builder_method.invoke(None, to_jarray(gateway.jvm.Object,
[j_sql_type]))
jdbc_execution_options = jdbc_execution_options if jdbc_execution_options is not None \
else JdbcExecutionOptions.defaults()
j_jdbc_sink = gateway.jvm.org.apache.flink.connector.jdbc.JdbcSink\
.sink(sql, j_statement_builder, jdbc_execution_options._j_jdbc_execution_options,
jdbc_connection_options._j_jdbc_connection_options)
return JdbcSink(j_jdbc_sink=j_jdbc_sink)
class JdbcConnectionOptions(object):
"""
JDBC connection options.
"""
def __init__(self, j_jdbc_connection_options):
self._j_jdbc_connection_options = j_jdbc_connection_options
def get_db_url(self) -> str:
return self._j_jdbc_connection_options.getDbURL()
def get_driver_name(self) -> str:
return self._j_jdbc_connection_options.getDriverName()
def get_user_name(self) -> str:
return self._j_jdbc_connection_options.getUsername()
def get_password(self) -> str:
return self._j_jdbc_connection_options.getPassword()
class JdbcConnectionOptionsBuilder(object):
"""
Builder for JdbcConnectionOptions.
"""
def __init__(self):
self._j_options_builder = get_gateway().jvm.org.apache.flink.connector\
.jdbc.JdbcConnectionOptions.JdbcConnectionOptionsBuilder()
def with_url(self, url: str) -> 'JdbcConnectionOptions.JdbcConnectionOptionsBuilder':
self._j_options_builder.withUrl(url)
return self
def with_driver_name(self, driver_name: str) \
-> 'JdbcConnectionOptions.JdbcConnectionOptionsBuilder':
self._j_options_builder.withDriverName(driver_name)
return self
def with_user_name(self, user_name: str) \
-> 'JdbcConnectionOptions.JdbcConnectionOptionsBuilder':
self._j_options_builder.withUsername(user_name)
return self
def with_password(self, password: str) \
-> 'JdbcConnectionOptions.JdbcConnectionOptionsBuilder':
self._j_options_builder.withPassword(password)
return self
def build(self) -> 'JdbcConnectionOptions':
return JdbcConnectionOptions(j_jdbc_connection_options=self._j_options_builder.build())
class JdbcExecutionOptions(object):
"""
JDBC sink batch options.
"""
def __init__(self, j_jdbc_execution_options):
self._j_jdbc_execution_options = j_jdbc_execution_options
def get_batch_interval_ms(self) -> int:
return self._j_jdbc_execution_options.getBatchIntervalMs()
def get_batch_size(self) -> int:
return self._j_jdbc_execution_options.getBatchSize()
def get_max_retries(self) -> int:
return self._j_jdbc_execution_options.getMaxRetries()
@staticmethod
def defaults() -> 'JdbcExecutionOptions':
return JdbcExecutionOptions(
j_jdbc_execution_options=get_gateway().jvm
.org.apache.flink.connector.jdbc.JdbcExecutionOptions.defaults())
@staticmethod
def builder() -> 'Builder':
return JdbcExecutionOptions.Builder()
class Builder(object):
"""
Builder for JdbcExecutionOptions.
"""
def __init__(self):
self._j_builder = get_gateway().jvm\
.org.apache.flink.connector.jdbc.JdbcExecutionOptions.builder()
def with_batch_size(self, size: int) -> 'JdbcExecutionOptions.Builder':
self._j_builder.withBatchSize(size)
return self
def with_batch_interval_ms(self, interval_ms: int) -> 'JdbcExecutionOptions.Builder':
self._j_builder.withBatchIntervalMs(interval_ms)
return self
def with_max_retries(self, max_retries: int) -> 'JdbcExecutionOptions.Builder':
self._j_builder.withMaxRetries(max_retries)
return self
def build(self) -> 'JdbcExecutionOptions':
return JdbcExecutionOptions(j_jdbc_execution_options=self._j_builder.build())
class RollingPolicy(object):
"""
The policy based on which a `Bucket` in the `StreamingFileSink`
rolls its currently open part file and opens a new one.
"""
def __init__(self, j_policy):
self.j_policy = j_policy
class DefaultRollingPolicy(RollingPolicy):
"""
The default implementation of the `RollingPolicy`.
"""
def __init__(self, j_policy):
super(DefaultRollingPolicy, self).__init__(j_policy)
@staticmethod
def builder() -> 'DefaultRollingPolicy.PolicyBuilder':
"""
Creates a new `PolicyBuilder` that is used to configure and build
an instance of `DefaultRollingPolicy`.
"""
return DefaultRollingPolicy.PolicyBuilder()
class PolicyBuilder(object):
"""
A helper class that holds the configuration properties for the `DefaultRollingPolicy`.
The `PolicyBuilder.build()` method must be called to instantiate the policy.
"""
def __init__(self):
self.part_size = 1024 * 1024 * 128
self.rollover_interval = 60 * 1000
self.inactivity_interval = 60 * 1000
def with_max_part_size(self, size: int) -> 'DefaultRollingPolicy.PolicyBuilder':
"""
Sets the part size above which a part file will have to roll.
:param size: the allowed part size.
"""
assert size > 0
self.part_size = size
return self
def with_inactivity_interval(self, interval: int) -> 'DefaultRollingPolicy.PolicyBuilder':
"""
Sets the interval of allowed inactivity after which a part file will have to roll.
:param interval: the allowed inactivity interval.
"""
assert interval > 0
self.inactivity_interval = interval
return self
def with_rollover_interval(self, interval) -> 'DefaultRollingPolicy.PolicyBuilder':
"""
Sets the max time a part file can stay open before having to roll.
:param interval: the desired rollover interval.
"""
self.rollover_interval = interval
return self
def build(self) -> 'DefaultRollingPolicy':
"""
Creates the actual policy.
"""
j_builder = get_gateway().jvm.org.apache.flink.streaming.api.\
functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy.create()
j_builder = j_builder.withMaxPartSize(self.part_size)
j_builder = j_builder.withInactivityInterval(self.inactivity_interval)
j_builder = j_builder.withRolloverInterval(self.rollover_interval)
return DefaultRollingPolicy(j_builder.build())
class StreamingFileSink(SinkFunction):
"""
Sink that emits its input elements to `FileSystem` files within buckets. This is
integrated with the checkpointing mechanism to provide exactly once semantics.
When creating the sink a `basePath` must be specified. The base directory contains
one directory for every bucket. The bucket directories themselves contain several part files,
with at least one for each parallel subtask of the sink which is writing data to that bucket.
These part files contain the actual output data.
"""
def __init__(self, j_obj):
super(StreamingFileSink, self).__init__(j_obj)
class DefaultRowFormatBuilder(object):
"""
Builder for the vanilla `StreamingFileSink` using a row format.
"""
def __init__(self, j_default_row_format_builder):
self.j_default_row_format_builder = j_default_row_format_builder
def with_bucket_check_interval(
self, interval: int) -> 'StreamingFileSink.DefaultRowFormatBuilder':
self.j_default_row_format_builder.withBucketCheckInterval(interval)
return self
def with_bucket_assigner(
self,
assigner_class_name: str) -> 'StreamingFileSink.DefaultRowFormatBuilder':
gateway = get_gateway()
java_import(gateway.jvm, assigner_class_name)
j_record_class = load_java_class(assigner_class_name)
self.j_default_row_format_builder.withBucketAssigner(j_record_class)
return self
def with_rolling_policy(
self,
policy: RollingPolicy) -> 'StreamingFileSink.DefaultRowFormatBuilder':
self.j_default_row_format_builder.withRollingPolicy(policy.j_policy)
return self
def with_output_file_config(
self,
output_file_config: 'OutputFileConfig') \
-> 'StreamingFileSink.DefaultRowFormatBuilder':
self.j_default_row_format_builder.withOutputFileConfig(output_file_config.j_obj)
return self
def build(self) -> 'StreamingFileSink':
j_stream_file_sink = self.j_default_row_format_builder.build()
return StreamingFileSink(j_stream_file_sink)
@staticmethod
def for_row_format(base_path: str, encoder: Encoder) -> 'DefaultRowFormatBuilder':
j_path = get_gateway().jvm.org.apache.flink.core.fs.Path(base_path)
j_default_row_format_builder = get_gateway().jvm.org.apache.flink.streaming.api.\
functions.sink.filesystem.StreamingFileSink.forRowFormat(j_path, encoder.j_encoder)
return StreamingFileSink.DefaultRowFormatBuilder(j_default_row_format_builder)
class OutputFileConfig(object):
"""
Part file name configuration.
This allow to define a prefix and a suffix to the part file name.
"""
@staticmethod
def builder():
return OutputFileConfig.OutputFileConfigBuilder()
def __init__(self, part_prefix: str, part_suffix: str):
self.j_obj = get_gateway().jvm.org.apache.flink.streaming.api.\
functions.sink.filesystem.OutputFileConfig(part_prefix, part_suffix)
def get_part_prefix(self) -> str:
"""
The prefix for the part name.
"""
return self.j_obj.getPartPrefix()
def get_part_suffix(self) -> str:
"""
The suffix for the part name.
"""
return self.j_obj.getPartSuffix()
class OutputFileConfigBuilder(object):
"""
A builder to create the part file configuration.
"""
def __init__(self):
self.part_prefix = "part"
self.part_suffix = ""
def with_part_prefix(self, prefix) -> 'OutputFileConfig.OutputFileConfigBuilder':
self.part_prefix = prefix
return self
def with_part_suffix(self, suffix) -> 'OutputFileConfig.OutputFileConfigBuilder':
self.part_suffix = suffix
return self
def build(self) -> 'OutputFileConfig':
return OutputFileConfig(self.part_prefix, self.part_suffix)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras metrics functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.keras import metrics
from tensorflow.python.keras.utils import metrics_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class FalsePositivesTest(test.TestCase):
def test_config(self):
fp_obj = metrics.FalsePositives(name='my_fp', thresholds=[0.4, 0.9])
self.assertEqual(fp_obj.name, 'my_fp')
self.assertEqual(len(fp_obj.variables), 1)
self.assertEqual(fp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fp_obj2 = metrics.FalsePositives.from_config(fp_obj.get_config())
self.assertEqual(fp_obj2.name, 'my_fp')
self.assertEqual(len(fp_obj2.variables), 1)
self.assertEqual(fp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fp_obj = metrics.FalsePositives()
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = fp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fp_obj.result()
self.assertAllClose(7., result)
def test_weighted(self):
fp_obj = metrics.FalsePositives()
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(14., self.evaluate(result))
def test_unweighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = fp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fp_obj.result()
self.assertAllClose([7., 4., 2.], result)
def test_weighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0), (5.0, 15.0, 10.0, 0))
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([125., 42., 12.], self.evaluate(result))
def test_threshold_limit(self):
with self.assertRaisesRegexp(
ValueError,
r'Threshold values must be in \[0, 1\]. Invalid values: \[-1, 2\]'):
metrics.FalsePositives(thresholds=[-1, 0.5, 2])
with self.assertRaisesRegexp(
ValueError,
r'Threshold values must be in \[0, 1\]. Invalid values: \[None\]'):
metrics.FalsePositives(thresholds=[None])
@test_util.run_all_in_graph_and_eager_modes
class FalseNegativesTest(test.TestCase):
def test_config(self):
fn_obj = metrics.FalseNegatives(name='my_fn', thresholds=[0.4, 0.9])
self.assertEqual(fn_obj.name, 'my_fn')
self.assertEqual(len(fn_obj.variables), 1)
self.assertEqual(fn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fn_obj2 = metrics.FalseNegatives.from_config(fn_obj.get_config())
self.assertEqual(fn_obj2.name, 'my_fn')
self.assertEqual(len(fn_obj2.variables), 1)
self.assertEqual(fn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fn_obj = metrics.FalseNegatives()
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = fn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fn_obj.result()
self.assertAllClose(3., result)
def test_weighted(self):
fn_obj = metrics.FalseNegatives()
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(5., self.evaluate(result))
def test_unweighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = fn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fn_obj.result()
self.assertAllClose([1., 4., 6.], result)
def test_weighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((3.0,), (5.0,), (7.0,), (4.0,))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([4., 16., 23.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class TrueNegativesTest(test.TestCase):
def test_config(self):
tn_obj = metrics.TrueNegatives(name='my_tn', thresholds=[0.4, 0.9])
self.assertEqual(tn_obj.name, 'my_tn')
self.assertEqual(len(tn_obj.variables), 1)
self.assertEqual(tn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tn_obj2 = metrics.TrueNegatives.from_config(tn_obj.get_config())
self.assertEqual(tn_obj2.name, 'my_tn')
self.assertEqual(len(tn_obj2.variables), 1)
self.assertEqual(tn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tn_obj = metrics.TrueNegatives()
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = tn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tn_obj.result()
self.assertAllClose(3., result)
def test_weighted(self):
tn_obj = metrics.TrueNegatives()
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(4., self.evaluate(result))
def test_unweighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = tn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tn_obj.result()
self.assertAllClose([2., 5., 7.], result)
def test_weighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((0.0, 2.0, 3.0, 5.0),)
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([5., 15., 23.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class TruePositivesTest(test.TestCase):
def test_config(self):
tp_obj = metrics.TruePositives(name='my_tp', thresholds=[0.4, 0.9])
self.assertEqual(tp_obj.name, 'my_tp')
self.assertEqual(len(tp_obj.variables), 1)
self.assertEqual(tp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tp_obj2 = metrics.TruePositives.from_config(tp_obj.get_config())
self.assertEqual(tp_obj2.name, 'my_tp')
self.assertEqual(len(tp_obj2.variables), 1)
self.assertEqual(tp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tp_obj = metrics.TruePositives()
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = tp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tp_obj.result()
self.assertAllClose(7., result)
def test_weighted(self):
tp_obj = metrics.TruePositives()
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = tp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(12., self.evaluate(result))
def test_unweighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = tp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tp_obj.result()
self.assertAllClose([6., 3., 1.], result)
def test_weighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
result = tp_obj(y_true, y_pred, sample_weight=37.)
self.assertAllClose([222., 111., 37.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class PrecisionTest(test.TestCase):
def test_config(self):
p_obj = metrics.Precision(
name='my_precision', thresholds=[0.4, 0.9], top_k=15, class_id=12)
self.assertEqual(p_obj.name, 'my_precision')
self.assertEqual(len(p_obj.variables), 2)
self.assertEqual([v.name for v in p_obj.variables],
['true_positives:0', 'false_positives:0'])
self.assertEqual(p_obj.thresholds, [0.4, 0.9])
self.assertEqual(p_obj.top_k, 15)
self.assertEqual(p_obj.class_id, 12)
# Check save and restore config
p_obj2 = metrics.Precision.from_config(p_obj.get_config())
self.assertEqual(p_obj2.name, 'my_precision')
self.assertEqual(len(p_obj2.variables), 2)
self.assertEqual(p_obj2.thresholds, [0.4, 0.9])
self.assertEqual(p_obj2.top_k, 15)
self.assertEqual(p_obj2.class_id, 12)
def test_value_is_idempotent(self):
p_obj = metrics.Precision(thresholds=[0.3, 0.72])
y_pred = random_ops.random_uniform(shape=(10, 3))
y_true = random_ops.random_uniform(shape=(10, 3))
update_op = p_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(p_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_precision = self.evaluate(p_obj.result())
for _ in range(10):
self.assertArrayNear(initial_precision, self.evaluate(p_obj.result()),
1e-3)
def test_unweighted(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_unweighted_all_incorrect(self):
p_obj = metrics.Precision(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs)
y_true = constant_op.constant(1 - inputs)
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0, self.evaluate(result))
def test_weighted(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
y_true = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(
y_true,
y_pred,
sample_weight=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, self.evaluate(result))
def test_div_by_zero(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([0, 0, 0, 0])
y_true = constant_op.constant([0, 0, 0, 0])
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertEqual(0, self.evaluate(result))
def test_unweighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 0.7])
y_pred = constant_op.constant([1, 0, 0.6, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertArrayNear([0.5, 0.], self.evaluate(result), 0)
def test_weighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[4, 0], [3, 1]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.
weighted_positives = (0 + 3.) + (4. + 0.)
expected_precision = weighted_tp / weighted_positives
self.assertArrayNear([expected_precision, 0], self.evaluate(result), 1e-3)
def test_multiple_updates(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[4, 0], [3, 1]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(p_obj.variables))
update_op = p_obj.update_state(y_true, y_pred, sample_weight=weights)
for _ in range(2):
self.evaluate(update_op)
weighted_tp = (0 + 3.) + (0 + 3.)
weighted_positives = ((0 + 3.) + (4. + 0.)) + ((0 + 3.) + (4. + 0.))
expected_precision = weighted_tp / weighted_positives
self.assertArrayNear([expected_precision, 0], self.evaluate(p_obj.result()),
1e-3)
def test_unweighted_top_k(self):
p_obj = metrics.Precision(top_k=3)
y_pred = constant_op.constant([0.2, 0.1, 0.5, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1. / 3, self.evaluate(result))
def test_weighted_top_k(self):
p_obj = metrics.Precision(top_k=3)
y_pred1 = constant_op.constant([0.2, 0.1, 0.4, 0, 0.2], shape=(1, 5))
y_true1 = constant_op.constant([0, 1, 1, 0, 1], shape=(1, 5))
self.evaluate(variables.variables_initializer(p_obj.variables))
self.evaluate(
p_obj(
y_true1,
y_pred1,
sample_weight=constant_op.constant([[1, 4, 2, 3, 5]])))
y_pred2 = constant_op.constant([0.2, 0.6, 0.4, 0.2, 0.2], shape=(1, 5))
y_true2 = constant_op.constant([1, 0, 1, 1, 1], shape=(1, 5))
result = p_obj(y_true2, y_pred2, sample_weight=constant_op.constant(3))
tp = (2 + 5) + (3 + 3)
predicted_positives = (1 + 2 + 5) + (3 + 3 + 3)
expected_precision = tp / predicted_positives
self.assertAlmostEqual(expected_precision, self.evaluate(result))
def test_unweighted_class_id(self):
p_obj = metrics.Precision(class_id=2)
self.evaluate(variables.variables_initializer(p_obj.variables))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = constant_op.constant([0.2, 0.1, 0, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 0, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(p_obj.false_positives))
def test_unweighted_top_k_and_class_id(self):
p_obj = metrics.Precision(class_id=2, top_k=2)
self.evaluate(variables.variables_initializer(p_obj.variables))
y_pred = constant_op.constant([0.2, 0.6, 0.3, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = constant_op.constant([1, 1, 0.9, 1, 1], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
def test_unweighted_top_k_and_threshold(self):
p_obj = metrics.Precision(thresholds=.7, top_k=2)
self.evaluate(variables.variables_initializer(p_obj.variables))
y_pred = constant_op.constant([0.2, 0.8, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 1], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
@test_util.run_all_in_graph_and_eager_modes
class RecallTest(test.TestCase):
def test_config(self):
r_obj = metrics.Recall(
name='my_recall', thresholds=[0.4, 0.9], top_k=15, class_id=12)
self.assertEqual(r_obj.name, 'my_recall')
self.assertEqual(len(r_obj.variables), 2)
self.assertEqual([v.name for v in r_obj.variables],
['true_positives:0', 'false_negatives:0'])
self.assertEqual(r_obj.thresholds, [0.4, 0.9])
self.assertEqual(r_obj.top_k, 15)
self.assertEqual(r_obj.class_id, 12)
# Check save and restore config
r_obj2 = metrics.Recall.from_config(r_obj.get_config())
self.assertEqual(r_obj2.name, 'my_recall')
self.assertEqual(len(r_obj2.variables), 2)
self.assertEqual(r_obj2.thresholds, [0.4, 0.9])
self.assertEqual(r_obj2.top_k, 15)
self.assertEqual(r_obj2.class_id, 12)
def test_value_is_idempotent(self):
r_obj = metrics.Recall(thresholds=[0.3, 0.72])
y_pred = random_ops.random_uniform(shape=(10, 3))
y_true = random_ops.random_uniform(shape=(10, 3))
update_op = r_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(r_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_recall = self.evaluate(r_obj.result())
for _ in range(10):
self.assertArrayNear(initial_recall, self.evaluate(r_obj.result()), 1e-3)
def test_unweighted(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_unweighted_all_incorrect(self):
r_obj = metrics.Recall(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs)
y_true = constant_op.constant(1 - inputs)
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0, self.evaluate(result))
def test_weighted(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
y_true = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(
y_true,
y_pred,
sample_weight=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_recall = weighted_tp / weighted_t
self.assertAlmostEqual(expected_recall, self.evaluate(result))
def test_div_by_zero(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([0, 0, 0, 0])
y_true = constant_op.constant([0, 0, 0, 0])
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertEqual(0, self.evaluate(result))
def test_unweighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 0.7])
y_pred = constant_op.constant([1, 0, 0.6, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertArrayNear([0.5, 0.], self.evaluate(result), 0)
def test_weighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[1, 4], [3, 2]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.
weighted_positives = (0 + 3.) + (4. + 0.)
expected_recall = weighted_tp / weighted_positives
self.assertArrayNear([expected_recall, 0], self.evaluate(result), 1e-3)
def test_multiple_updates(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[1, 4], [3, 2]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(r_obj.variables))
update_op = r_obj.update_state(y_true, y_pred, sample_weight=weights)
for _ in range(2):
self.evaluate(update_op)
weighted_tp = (0 + 3.) + (0 + 3.)
weighted_positives = ((0 + 3.) + (4. + 0.)) + ((0 + 3.) + (4. + 0.))
expected_recall = weighted_tp / weighted_positives
self.assertArrayNear([expected_recall, 0], self.evaluate(r_obj.result()),
1e-3)
def test_unweighted_top_k(self):
r_obj = metrics.Recall(top_k=3)
y_pred = constant_op.constant([0.2, 0.1, 0.5, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_weighted_top_k(self):
r_obj = metrics.Recall(top_k=3)
y_pred1 = constant_op.constant([0.2, 0.1, 0.4, 0, 0.2], shape=(1, 5))
y_true1 = constant_op.constant([0, 1, 1, 0, 1], shape=(1, 5))
self.evaluate(variables.variables_initializer(r_obj.variables))
self.evaluate(
r_obj(
y_true1,
y_pred1,
sample_weight=constant_op.constant([[1, 4, 2, 3, 5]])))
y_pred2 = constant_op.constant([0.2, 0.6, 0.4, 0.2, 0.2], shape=(1, 5))
y_true2 = constant_op.constant([1, 0, 1, 1, 1], shape=(1, 5))
result = r_obj(y_true2, y_pred2, sample_weight=constant_op.constant(3))
tp = (2 + 5) + (3 + 3)
positives = (4 + 2 + 5) + (3 + 3 + 3 + 3)
expected_recall = tp / positives
self.assertAlmostEqual(expected_recall, self.evaluate(result))
def test_unweighted_class_id(self):
r_obj = metrics.Recall(class_id=2)
self.evaluate(variables.variables_initializer(r_obj.variables))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(r_obj.false_negatives))
y_pred = constant_op.constant([0.2, 0.1, 0, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 0, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
def test_unweighted_top_k_and_class_id(self):
r_obj = metrics.Recall(class_id=2, top_k=2)
self.evaluate(variables.variables_initializer(r_obj.variables))
y_pred = constant_op.constant([0.2, 0.6, 0.3, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(r_obj.false_negatives))
y_pred = constant_op.constant([1, 1, 0.9, 1, 1], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
def test_unweighted_top_k_and_threshold(self):
r_obj = metrics.Recall(thresholds=.7, top_k=2)
self.evaluate(variables.variables_initializer(r_obj.variables))
y_pred = constant_op.constant([0.2, 0.8, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([1, 1, 1, 0, 1], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.25, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(3, self.evaluate(r_obj.false_negatives))
@test_util.run_all_in_graph_and_eager_modes
class SensitivityAtSpecificityTest(test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.SensitivityAtSpecificity(
0.4, num_thresholds=100, name='sensitivity_at_specificity_1')
self.assertEqual(s_obj.name, 'sensitivity_at_specificity_1')
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.specificity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
# Check save and restore config
s_obj2 = metrics.SensitivityAtSpecificity.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, 'sensitivity_at_specificity_1')
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.specificity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
def test_value_is_idempotent(self):
s_obj = metrics.SensitivityAtSpecificity(0.7)
y_pred = random_ops.random_uniform((10, 3),
maxval=1,
dtype=dtypes.float32,
seed=1)
y_true = random_ops.random_uniform((10, 3),
maxval=2,
dtype=dtypes.int64,
seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_sensitivity = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, self.evaluate(s_obj.result()),
1e-3)
def test_unweighted_all_correct(self):
s_obj = metrics.SensitivityAtSpecificity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs, dtype=dtypes.float32)
y_true = constant_op.constant(inputs)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.8)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.8, self.evaluate(result))
def test_unweighted_low_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
@parameterized.parameters([dtypes.bool, dtypes.int32, dtypes.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = math_ops.cast(label_values, dtype=label_dtype)
weights = constant_op.constant(weight_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.675, self.evaluate(result))
def test_invalid_specificity(self):
with self.assertRaisesRegexp(
ValueError, r'`specificity` must be in the range \[0, 1\].'):
metrics.SensitivityAtSpecificity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 0.'):
metrics.SensitivityAtSpecificity(0.4, num_thresholds=-1)
@test_util.run_all_in_graph_and_eager_modes
class SpecificityAtSensitivityTest(test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.SpecificityAtSensitivity(
0.4, num_thresholds=100, name='specificity_at_sensitivity_1')
self.assertEqual(s_obj.name, 'specificity_at_sensitivity_1')
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.sensitivity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
# Check save and restore config
s_obj2 = metrics.SpecificityAtSensitivity.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, 'specificity_at_sensitivity_1')
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.sensitivity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
def test_value_is_idempotent(self):
s_obj = metrics.SpecificityAtSensitivity(0.7)
y_pred = random_ops.random_uniform((10, 3),
maxval=1,
dtype=dtypes.float32,
seed=1)
y_true = random_ops.random_uniform((10, 3),
maxval=2,
dtype=dtypes.int64,
seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_specificity = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(initial_specificity, self.evaluate(s_obj.result()),
1e-3)
def test_unweighted_all_correct(self):
s_obj = metrics.SpecificityAtSensitivity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs, dtype=dtypes.float32)
y_true = constant_op.constant(inputs)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.8)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.4, self.evaluate(result))
def test_unweighted_low_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
@parameterized.parameters([dtypes.bool, dtypes.int32, dtypes.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = math_ops.cast(label_values, dtype=label_dtype)
weights = constant_op.constant(weight_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.4, self.evaluate(result))
def test_invalid_sensitivity(self):
with self.assertRaisesRegexp(
ValueError, r'`sensitivity` must be in the range \[0, 1\].'):
metrics.SpecificityAtSensitivity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 0.'):
metrics.SpecificityAtSensitivity(0.4, num_thresholds=-1)
@test_util.run_all_in_graph_and_eager_modes
class AUCTest(test.TestCase):
def setup(self):
self.num_thresholds = 3
self.y_pred = constant_op.constant([0, 0.5, 0.3, 0.9], dtype=dtypes.float32)
self.y_true = constant_op.constant([0, 0, 1, 1])
self.sample_weight = [1, 2, 3, 4]
# threshold values are [0 - 1e-7, 0.5, 1 + 1e-7]
# y_pred when threshold = 0 - 1e-7 : [1, 1, 1, 1]
# y_pred when threshold = 0.5 : [0, 0, 0, 1]
# y_pred when threshold = 1 + 1e-7 : [0, 0, 0, 0]
# without sample_weight:
# tp = np.sum([[0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]], axis=1)
# fp = np.sum([[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], axis=1)
# fn = np.sum([[0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 1]], axis=1)
# tn = np.sum([[0, 0, 0, 0], [1, 1, 0, 0], [1, 1, 0, 0]], axis=1)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# with sample_weight:
# tp = np.sum([[0, 0, 3, 4], [0, 0, 0, 4], [0, 0, 0, 0]], axis=1)
# fp = np.sum([[1, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], axis=1)
# fn = np.sum([[0, 0, 0, 0], [0, 0, 3, 0], [0, 0, 3, 4]], axis=1)
# tn = np.sum([[0, 0, 0, 0], [1, 2, 0, 0], [1, 2, 0, 0]], axis=1)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
def test_config(self):
auc_obj = metrics.AUC(
num_thresholds=100,
curve=metrics_utils.AUCCurve.PR,
summation_method=metrics_utils.AUCSummationMethod.MAJORING,
name='auc_1')
self.assertEqual(auc_obj.name, 'auc_1')
self.assertEqual(len(auc_obj.variables), 4)
self.assertEqual(auc_obj.num_thresholds, 100)
self.assertEqual(auc_obj.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
# Check save and restore config
auc_obj2 = metrics.AUC.from_config(auc_obj.get_config())
self.assertEqual(auc_obj2.name, 'auc_1')
self.assertEqual(len(auc_obj2.variables), 4)
self.assertEqual(auc_obj2.num_thresholds, 100)
self.assertEqual(auc_obj2.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj2.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
def test_value_is_idempotent(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=3)
self.evaluate(variables.variables_initializer(auc_obj.variables))
# Run several updates.
update_op = auc_obj.update_state(self.y_true, self.y_pred)
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_auc = self.evaluate(auc_obj.result())
for _ in range(10):
self.assertAllClose(initial_auc, self.evaluate(auc_obj.result()), 1e-3)
def test_unweighted_all_correct(self):
self.setup()
auc_obj = metrics.AUC()
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_true)
self.assertEqual(self.evaluate(result), 1)
def test_unweighted(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds)
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# recall = [2/2, 1/(1+1), 0] = [1, 0.5, 0]
# fp_rate = [2/2, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.5)/2, (0.5 + 0)/2] = [0.75, 0.25]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.75 * 1 + 0.25 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_interpolation(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds)
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.571)/2, (0.571 + 0)/2] = [0.7855, 0.2855]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.7855 * 1 + 0.2855 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_majoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
summation_method=metrics_utils.AUCSummationMethod.MAJORING)
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [max(1, 0.571), max(0.571, 0)] = [1, 0.571]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (1 * 1 + 0.571 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_minoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
summation_method=metrics_utils.AUCSummationMethod.MINORING)
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [min(1, 0.571), min(0.571, 0)] = [0.571, 0]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.571 * 1 + 0 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_majoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
curve=metrics_utils.AUCCurve.PR,
summation_method=metrics_utils.AUCSummationMethod.MAJORING)
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# precision = [7/(7+3), 4/4, 0] = [0.7, 1, 0]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# heights = [max(0.7, 1), max(1, 0)] = [1, 1]
# widths = [(1 - 0.571), (0.571 - 0)] = [0.429, 0.571]
expected_result = (1 * 0.429 + 1 * 0.571)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_minoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
curve=metrics_utils.AUCCurve.PR,
summation_method=metrics_utils.AUCSummationMethod.MINORING)
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# precision = [7/(7+3), 4/4, 0] = [0.7, 1, 0]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# heights = [min(0.7, 1), min(1, 0)] = [0.7, 0]
# widths = [(1 - 0.571), (0.571 - 0)] = [0.429, 0.571]
expected_result = (0.7 * 0.429 + 0 * 0.571)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_interpolation(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
curve=metrics_utils.AUCCurve.PR,
summation_method=metrics_utils.AUCSummationMethod.INTERPOLATION)
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# auc = (slope / Total Pos) * [dTP - intercept * log(Pb/Pa)]
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# P = tp + fp = [10, 4, 0]
# dTP = [7-4, 4-0] = [3, 4]
# dP = [10-4, 4-0] = [6, 4]
# slope = dTP/dP = [0.5, 1]
# intercept = (TPa+(slope*Pa) = [(4 - 0.5*4), (0 - 1*0)] = [2, 0]
# (Pb/Pa) = (Pb/Pa) if Pb > 0 AND Pa > 0 else 1 = [10/4, 4/0] = [2.5, 1]
# auc * TotalPos = [(0.5 * (3 + 2 * log(2.5))), (1 * (4 + 0))]
# = [2.416, 4]
# auc = [2.416, 4]/(tp[1:]+fn[1:])
expected_result = (2.416/7 + 4/7)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 1.'):
metrics.AUC(num_thresholds=-1)
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 1.'):
metrics.AUC(num_thresholds=1)
if __name__ == '__main__':
test.main()
|
|
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.contrib.admin.utils import (
NotRelationField, flatten, get_fields_from_path,
)
from django.core import checks
from django.core.exceptions import FieldDoesNotExist
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.forms.models import (
BaseModelForm, BaseModelFormSet, _get_foreign_key,
)
from django.template.engine import Engine
def check_admin_app(app_configs, **kwargs):
from django.contrib.admin.sites import all_sites
errors = []
for site in all_sites:
errors.extend(site.check(app_configs))
return errors
def check_dependencies(**kwargs):
"""
Check that the admin's dependencies are correctly installed.
"""
errors = []
# contrib.contenttypes must be installed.
if not apps.is_installed('django.contrib.contenttypes'):
missing_app = checks.Error(
"'django.contrib.contenttypes' must be in INSTALLED_APPS in order "
"to use the admin application.",
id="admin.E401",
)
errors.append(missing_app)
# The auth context processor must be installed if using the default
# authentication backend.
try:
default_template_engine = Engine.get_default()
except Exception:
# Skip this non-critical check:
# 1. if the user has a non-trivial TEMPLATES setting and Django
# can't find a default template engine
# 2. if anything goes wrong while loading template engines, in
# order to avoid raising an exception from a confusing location
# Catching ImproperlyConfigured suffices for 1. but 2. requires
# catching all exceptions.
pass
else:
if ('django.contrib.auth.context_processors.auth'
not in default_template_engine.context_processors and
'django.contrib.auth.backends.ModelBackend' in settings.AUTHENTICATION_BACKENDS):
missing_template = checks.Error(
"'django.contrib.auth.context_processors.auth' must be in "
"TEMPLATES in order to use the admin application.",
id="admin.E402"
)
errors.append(missing_template)
return errors
class BaseModelAdminChecks:
def check(self, admin_obj, **kwargs):
errors = []
errors.extend(self._check_raw_id_fields(admin_obj))
errors.extend(self._check_fields(admin_obj))
errors.extend(self._check_fieldsets(admin_obj))
errors.extend(self._check_exclude(admin_obj))
errors.extend(self._check_form(admin_obj))
errors.extend(self._check_filter_vertical(admin_obj))
errors.extend(self._check_filter_horizontal(admin_obj))
errors.extend(self._check_radio_fields(admin_obj))
errors.extend(self._check_prepopulated_fields(admin_obj))
errors.extend(self._check_view_on_site_url(admin_obj))
errors.extend(self._check_ordering(admin_obj))
errors.extend(self._check_readonly_fields(admin_obj))
return errors
def _check_raw_id_fields(self, obj):
""" Check that `raw_id_fields` only contains field names that are listed
on the model. """
if not isinstance(obj.raw_id_fields, (list, tuple)):
return must_be('a list or tuple', option='raw_id_fields', obj=obj, id='admin.E001')
else:
return list(chain.from_iterable(
self._check_raw_id_fields_item(obj, obj.model, field_name, 'raw_id_fields[%d]' % index)
for index, field_name in enumerate(obj.raw_id_fields)
))
def _check_raw_id_fields_item(self, obj, model, field_name, label):
""" Check an item of `raw_id_fields`, i.e. check that field named
`field_name` exists in model `model` and is a ForeignKey or a
ManyToManyField. """
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E002')
else:
if not field.many_to_many and not isinstance(field, models.ForeignKey):
return must_be('a foreign key or a many-to-many field',
option=label, obj=obj, id='admin.E003')
else:
return []
def _check_fields(self, obj):
""" Check that `fields` only refer to existing fields, doesn't contain
duplicates. Check if at most one of `fields` and `fieldsets` is defined.
"""
if obj.fields is None:
return []
elif not isinstance(obj.fields, (list, tuple)):
return must_be('a list or tuple', option='fields', obj=obj, id='admin.E004')
elif obj.fieldsets:
return [
checks.Error(
"Both 'fieldsets' and 'fields' are specified.",
obj=obj.__class__,
id='admin.E005',
)
]
fields = flatten(obj.fields)
if len(fields) != len(set(fields)):
return [
checks.Error(
"The value of 'fields' contains duplicate field(s).",
obj=obj.__class__,
id='admin.E006',
)
]
return list(chain.from_iterable(
self._check_field_spec(obj, obj.model, field_name, 'fields')
for field_name in obj.fields
))
def _check_fieldsets(self, obj):
""" Check that fieldsets is properly formatted and doesn't contain
duplicates. """
if obj.fieldsets is None:
return []
elif not isinstance(obj.fieldsets, (list, tuple)):
return must_be('a list or tuple', option='fieldsets', obj=obj, id='admin.E007')
else:
return list(chain.from_iterable(
self._check_fieldsets_item(obj, obj.model, fieldset, 'fieldsets[%d]' % index)
for index, fieldset in enumerate(obj.fieldsets)
))
def _check_fieldsets_item(self, obj, model, fieldset, label):
""" Check an item of `fieldsets`, i.e. check that this is a pair of a
set name and a dictionary containing "fields" key. """
if not isinstance(fieldset, (list, tuple)):
return must_be('a list or tuple', option=label, obj=obj, id='admin.E008')
elif len(fieldset) != 2:
return must_be('of length 2', option=label, obj=obj, id='admin.E009')
elif not isinstance(fieldset[1], dict):
return must_be('a dictionary', option='%s[1]' % label, obj=obj, id='admin.E010')
elif 'fields' not in fieldset[1]:
return [
checks.Error(
"The value of '%s[1]' must contain the key 'fields'." % label,
obj=obj.__class__,
id='admin.E011',
)
]
elif not isinstance(fieldset[1]['fields'], (list, tuple)):
return must_be('a list or tuple', option="%s[1]['fields']" % label, obj=obj, id='admin.E008')
fields = flatten(fieldset[1]['fields'])
if len(fields) != len(set(fields)):
return [
checks.Error(
"There are duplicate field(s) in '%s[1]'." % label,
obj=obj.__class__,
id='admin.E012',
)
]
return list(chain.from_iterable(
self._check_field_spec(obj, model, fieldset_fields, '%s[1]["fields"]' % label)
for fieldset_fields in fieldset[1]['fields']
))
def _check_field_spec(self, obj, model, fields, label):
""" `fields` should be an item of `fields` or an item of
fieldset[1]['fields'] for any `fieldset` in `fieldsets`. It should be a
field name or a tuple of field names. """
if isinstance(fields, tuple):
return list(chain.from_iterable(
self._check_field_spec_item(obj, model, field_name, "%s[%d]" % (label, index))
for index, field_name in enumerate(fields)
))
else:
return self._check_field_spec_item(obj, model, fields, label)
def _check_field_spec_item(self, obj, model, field_name, label):
if field_name in obj.readonly_fields:
# Stuff can be put in fields that isn't actually a model field if
# it's in readonly_fields, readonly_fields will handle the
# validation of such things.
return []
else:
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
# If we can't find a field on the model that matches, it could
# be an extra field on the form.
return []
else:
if (isinstance(field, models.ManyToManyField) and
not field.remote_field.through._meta.auto_created):
return [
checks.Error(
"The value of '%s' cannot include the ManyToManyField '%s', "
"because that field manually specifies a relationship model."
% (label, field_name),
obj=obj.__class__,
id='admin.E013',
)
]
else:
return []
def _check_exclude(self, obj):
""" Check that exclude is a sequence without duplicates. """
if obj.exclude is None: # default value is None
return []
elif not isinstance(obj.exclude, (list, tuple)):
return must_be('a list or tuple', option='exclude', obj=obj, id='admin.E014')
elif len(obj.exclude) > len(set(obj.exclude)):
return [
checks.Error(
"The value of 'exclude' contains duplicate field(s).",
obj=obj.__class__,
id='admin.E015',
)
]
else:
return []
def _check_form(self, obj):
""" Check that form subclasses BaseModelForm. """
if hasattr(obj, 'form') and not issubclass(obj.form, BaseModelForm):
return must_inherit_from(parent='BaseModelForm', option='form',
obj=obj, id='admin.E016')
else:
return []
def _check_filter_vertical(self, obj):
""" Check that filter_vertical is a sequence of field names. """
if not hasattr(obj, 'filter_vertical'):
return []
elif not isinstance(obj.filter_vertical, (list, tuple)):
return must_be('a list or tuple', option='filter_vertical', obj=obj, id='admin.E017')
else:
return list(chain.from_iterable(
self._check_filter_item(obj, obj.model, field_name, "filter_vertical[%d]" % index)
for index, field_name in enumerate(obj.filter_vertical)
))
def _check_filter_horizontal(self, obj):
""" Check that filter_horizontal is a sequence of field names. """
if not hasattr(obj, 'filter_horizontal'):
return []
elif not isinstance(obj.filter_horizontal, (list, tuple)):
return must_be('a list or tuple', option='filter_horizontal', obj=obj, id='admin.E018')
else:
return list(chain.from_iterable(
self._check_filter_item(obj, obj.model, field_name, "filter_horizontal[%d]" % index)
for index, field_name in enumerate(obj.filter_horizontal)
))
def _check_filter_item(self, obj, model, field_name, label):
""" Check one item of `filter_vertical` or `filter_horizontal`, i.e.
check that given field exists and is a ManyToManyField. """
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E019')
else:
if not field.many_to_many:
return must_be('a many-to-many field', option=label, obj=obj, id='admin.E020')
else:
return []
def _check_radio_fields(self, obj):
""" Check that `radio_fields` is a dictionary. """
if not hasattr(obj, 'radio_fields'):
return []
elif not isinstance(obj.radio_fields, dict):
return must_be('a dictionary', option='radio_fields', obj=obj, id='admin.E021')
else:
return list(chain.from_iterable(
self._check_radio_fields_key(obj, obj.model, field_name, 'radio_fields') +
self._check_radio_fields_value(obj, val, 'radio_fields["%s"]' % field_name)
for field_name, val in obj.radio_fields.items()
))
def _check_radio_fields_key(self, obj, model, field_name, label):
""" Check that a key of `radio_fields` dictionary is name of existing
field and that the field is a ForeignKey or has `choices` defined. """
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E022')
else:
if not (isinstance(field, models.ForeignKey) or field.choices):
return [
checks.Error(
"The value of '%s' refers to '%s', which is not an "
"instance of ForeignKey, and does not have a 'choices' definition." % (
label, field_name
),
obj=obj.__class__,
id='admin.E023',
)
]
else:
return []
def _check_radio_fields_value(self, obj, val, label):
""" Check type of a value of `radio_fields` dictionary. """
from django.contrib.admin.options import HORIZONTAL, VERTICAL
if val not in (HORIZONTAL, VERTICAL):
return [
checks.Error(
"The value of '%s' must be either admin.HORIZONTAL or admin.VERTICAL." % label,
obj=obj.__class__,
id='admin.E024',
)
]
else:
return []
def _check_view_on_site_url(self, obj):
if hasattr(obj, 'view_on_site'):
if not callable(obj.view_on_site) and not isinstance(obj.view_on_site, bool):
return [
checks.Error(
"The value of 'view_on_site' must be a callable or a boolean value.",
obj=obj.__class__,
id='admin.E025',
)
]
else:
return []
else:
return []
def _check_prepopulated_fields(self, obj):
""" Check that `prepopulated_fields` is a dictionary containing allowed
field types. """
if not hasattr(obj, 'prepopulated_fields'):
return []
elif not isinstance(obj.prepopulated_fields, dict):
return must_be('a dictionary', option='prepopulated_fields', obj=obj, id='admin.E026')
else:
return list(chain.from_iterable(
self._check_prepopulated_fields_key(obj, obj.model, field_name, 'prepopulated_fields') +
self._check_prepopulated_fields_value(obj, obj.model, val, 'prepopulated_fields["%s"]' % field_name)
for field_name, val in obj.prepopulated_fields.items()
))
def _check_prepopulated_fields_key(self, obj, model, field_name, label):
""" Check a key of `prepopulated_fields` dictionary, i.e. check that it
is a name of existing field and the field is one of the allowed types.
"""
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E027')
else:
if isinstance(field, (models.DateTimeField, models.ForeignKey, models.ManyToManyField)):
return [
checks.Error(
"The value of '%s' refers to '%s', which must not be a DateTimeField, "
"a ForeignKey, a OneToOneField, or a ManyToManyField." % (label, field_name),
obj=obj.__class__,
id='admin.E028',
)
]
else:
return []
def _check_prepopulated_fields_value(self, obj, model, val, label):
""" Check a value of `prepopulated_fields` dictionary, i.e. it's an
iterable of existing fields. """
if not isinstance(val, (list, tuple)):
return must_be('a list or tuple', option=label, obj=obj, id='admin.E029')
else:
return list(chain.from_iterable(
self._check_prepopulated_fields_value_item(obj, model, subfield_name, "%s[%r]" % (label, index))
for index, subfield_name in enumerate(val)
))
def _check_prepopulated_fields_value_item(self, obj, model, field_name, label):
""" For `prepopulated_fields` equal to {"slug": ("title",)},
`field_name` is "title". """
try:
model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label, model=model, obj=obj, id='admin.E030')
else:
return []
def _check_ordering(self, obj):
""" Check that ordering refers to existing fields or is random. """
# ordering = None
if obj.ordering is None: # The default value is None
return []
elif not isinstance(obj.ordering, (list, tuple)):
return must_be('a list or tuple', option='ordering', obj=obj, id='admin.E031')
else:
return list(chain.from_iterable(
self._check_ordering_item(obj, obj.model, field_name, 'ordering[%d]' % index)
for index, field_name in enumerate(obj.ordering)
))
def _check_ordering_item(self, obj, model, field_name, label):
""" Check that `ordering` refers to existing fields. """
if field_name == '?' and len(obj.ordering) != 1:
return [
checks.Error(
"The value of 'ordering' has the random ordering marker '?', "
"but contains other fields as well.",
hint='Either remove the "?", or remove the other fields.',
obj=obj.__class__,
id='admin.E032',
)
]
elif field_name == '?':
return []
elif LOOKUP_SEP in field_name:
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
return []
else:
if field_name.startswith('-'):
field_name = field_name[1:]
if field_name == 'pk':
return []
try:
model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label, model=model, obj=obj, id='admin.E033')
else:
return []
def _check_readonly_fields(self, obj):
""" Check that readonly_fields refers to proper attribute or field. """
if obj.readonly_fields == ():
return []
elif not isinstance(obj.readonly_fields, (list, tuple)):
return must_be('a list or tuple', option='readonly_fields', obj=obj, id='admin.E034')
else:
return list(chain.from_iterable(
self._check_readonly_fields_item(obj, obj.model, field_name, "readonly_fields[%d]" % index)
for index, field_name in enumerate(obj.readonly_fields)
))
def _check_readonly_fields_item(self, obj, model, field_name, label):
if callable(field_name):
return []
elif hasattr(obj, field_name):
return []
elif hasattr(model, field_name):
return []
else:
try:
model._meta.get_field(field_name)
except FieldDoesNotExist:
return [
checks.Error(
"The value of '%s' is not a callable, an attribute of '%s', or an attribute of '%s.%s'." % (
label, obj.__class__.__name__, model._meta.app_label, model._meta.object_name
),
obj=obj.__class__,
id='admin.E035',
)
]
else:
return []
class ModelAdminChecks(BaseModelAdminChecks):
def check(self, admin_obj, **kwargs):
errors = super().check(admin_obj)
errors.extend(self._check_save_as(admin_obj))
errors.extend(self._check_save_on_top(admin_obj))
errors.extend(self._check_inlines(admin_obj))
errors.extend(self._check_list_display(admin_obj))
errors.extend(self._check_list_display_links(admin_obj))
errors.extend(self._check_list_filter(admin_obj))
errors.extend(self._check_list_select_related(admin_obj))
errors.extend(self._check_list_per_page(admin_obj))
errors.extend(self._check_list_max_show_all(admin_obj))
errors.extend(self._check_list_editable(admin_obj))
errors.extend(self._check_search_fields(admin_obj))
errors.extend(self._check_date_hierarchy(admin_obj))
return errors
def _check_save_as(self, obj):
""" Check save_as is a boolean. """
if not isinstance(obj.save_as, bool):
return must_be('a boolean', option='save_as',
obj=obj, id='admin.E101')
else:
return []
def _check_save_on_top(self, obj):
""" Check save_on_top is a boolean. """
if not isinstance(obj.save_on_top, bool):
return must_be('a boolean', option='save_on_top',
obj=obj, id='admin.E102')
else:
return []
def _check_inlines(self, obj):
""" Check all inline model admin classes. """
if not isinstance(obj.inlines, (list, tuple)):
return must_be('a list or tuple', option='inlines', obj=obj, id='admin.E103')
else:
return list(chain.from_iterable(
self._check_inlines_item(obj, obj.model, item, "inlines[%d]" % index)
for index, item in enumerate(obj.inlines)
))
def _check_inlines_item(self, obj, model, inline, label):
""" Check one inline model admin. """
inline_label = '.'.join([inline.__module__, inline.__name__])
from django.contrib.admin.options import InlineModelAdmin
if not issubclass(inline, InlineModelAdmin):
return [
checks.Error(
"'%s' must inherit from 'InlineModelAdmin'." % inline_label,
obj=obj.__class__,
id='admin.E104',
)
]
elif not inline.model:
return [
checks.Error(
"'%s' must have a 'model' attribute." % inline_label,
obj=obj.__class__,
id='admin.E105',
)
]
elif not issubclass(inline.model, models.Model):
return must_be('a Model', option='%s.model' % inline_label, obj=obj, id='admin.E106')
else:
return inline(model, obj.admin_site).check()
def _check_list_display(self, obj):
""" Check that list_display only contains fields or usable attributes.
"""
if not isinstance(obj.list_display, (list, tuple)):
return must_be('a list or tuple', option='list_display', obj=obj, id='admin.E107')
else:
return list(chain.from_iterable(
self._check_list_display_item(obj, obj.model, item, "list_display[%d]" % index)
for index, item in enumerate(obj.list_display)
))
def _check_list_display_item(self, obj, model, item, label):
if callable(item):
return []
elif hasattr(obj, item):
return []
elif hasattr(model, item):
# getattr(model, item) could be an X_RelatedObjectsDescriptor
try:
field = model._meta.get_field(item)
except FieldDoesNotExist:
try:
field = getattr(model, item)
except AttributeError:
field = None
if field is None:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not a "
"callable, an attribute of '%s', or an attribute or method on '%s.%s'." % (
label, item, obj.__class__.__name__, model._meta.app_label, model._meta.object_name
),
obj=obj.__class__,
id='admin.E108',
)
]
elif isinstance(field, models.ManyToManyField):
return [
checks.Error(
"The value of '%s' must not be a ManyToManyField." % label,
obj=obj.__class__,
id='admin.E109',
)
]
else:
return []
else:
try:
model._meta.get_field(item)
except FieldDoesNotExist:
return [
# This is a deliberate repeat of E108; there's more than one path
# required to test this condition.
checks.Error(
"The value of '%s' refers to '%s', which is not a callable, "
"an attribute of '%s', or an attribute or method on '%s.%s'." % (
label, item, obj.__class__.__name__, model._meta.app_label, model._meta.object_name
),
obj=obj.__class__,
id='admin.E108',
)
]
else:
return []
def _check_list_display_links(self, obj):
""" Check that list_display_links is a unique subset of list_display.
"""
from django.contrib.admin.options import ModelAdmin
if obj.list_display_links is None:
return []
elif not isinstance(obj.list_display_links, (list, tuple)):
return must_be('a list, a tuple, or None', option='list_display_links', obj=obj, id='admin.E110')
# Check only if ModelAdmin.get_list_display() isn't overridden.
elif obj.get_list_display.__func__ is ModelAdmin.get_list_display:
return list(chain.from_iterable(
self._check_list_display_links_item(obj, field_name, "list_display_links[%d]" % index)
for index, field_name in enumerate(obj.list_display_links)
))
return []
def _check_list_display_links_item(self, obj, field_name, label):
if field_name not in obj.list_display:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not defined in 'list_display'." % (
label, field_name
),
obj=obj.__class__,
id='admin.E111',
)
]
else:
return []
def _check_list_filter(self, obj):
if not isinstance(obj.list_filter, (list, tuple)):
return must_be('a list or tuple', option='list_filter', obj=obj, id='admin.E112')
else:
return list(chain.from_iterable(
self._check_list_filter_item(obj, obj.model, item, "list_filter[%d]" % index)
for index, item in enumerate(obj.list_filter)
))
def _check_list_filter_item(self, obj, model, item, label):
"""
Check one item of `list_filter`, i.e. check if it is one of three options:
1. 'field' -- a basic field filter, possibly w/ relationships (e.g.
'field__rel')
2. ('field', SomeFieldListFilter) - a field-based list filter class
3. SomeListFilter - a non-field list filter class
"""
from django.contrib.admin import ListFilter, FieldListFilter
if callable(item) and not isinstance(item, models.Field):
# If item is option 3, it should be a ListFilter...
if not issubclass(item, ListFilter):
return must_inherit_from(parent='ListFilter', option=label,
obj=obj, id='admin.E113')
# ... but not a FieldListFilter.
elif issubclass(item, FieldListFilter):
return [
checks.Error(
"The value of '%s' must not inherit from 'FieldListFilter'." % label,
obj=obj.__class__,
id='admin.E114',
)
]
else:
return []
elif isinstance(item, (tuple, list)):
# item is option #2
field, list_filter_class = item
if not issubclass(list_filter_class, FieldListFilter):
return must_inherit_from(parent='FieldListFilter', option='%s[1]' % label, obj=obj, id='admin.E115')
else:
return []
else:
# item is option #1
field = item
# Validate the field string
try:
get_fields_from_path(model, field)
except (NotRelationField, FieldDoesNotExist):
return [
checks.Error(
"The value of '%s' refers to '%s', which does not refer to a Field." % (label, field),
obj=obj.__class__,
id='admin.E116',
)
]
else:
return []
def _check_list_select_related(self, obj):
""" Check that list_select_related is a boolean, a list or a tuple. """
if not isinstance(obj.list_select_related, (bool, list, tuple)):
return must_be('a boolean, tuple or list', option='list_select_related', obj=obj, id='admin.E117')
else:
return []
def _check_list_per_page(self, obj):
""" Check that list_per_page is an integer. """
if not isinstance(obj.list_per_page, int):
return must_be('an integer', option='list_per_page', obj=obj, id='admin.E118')
else:
return []
def _check_list_max_show_all(self, obj):
""" Check that list_max_show_all is an integer. """
if not isinstance(obj.list_max_show_all, int):
return must_be('an integer', option='list_max_show_all', obj=obj, id='admin.E119')
else:
return []
def _check_list_editable(self, obj):
""" Check that list_editable is a sequence of editable fields from
list_display without first element. """
if not isinstance(obj.list_editable, (list, tuple)):
return must_be('a list or tuple', option='list_editable', obj=obj, id='admin.E120')
else:
return list(chain.from_iterable(
self._check_list_editable_item(obj, obj.model, item, "list_editable[%d]" % index)
for index, item in enumerate(obj.list_editable)
))
def _check_list_editable_item(self, obj, model, field_name, label):
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label, model=model, obj=obj, id='admin.E121')
else:
if field_name not in obj.list_display:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not "
"contained in 'list_display'." % (label, field_name),
obj=obj.__class__,
id='admin.E122',
)
]
elif obj.list_display_links and field_name in obj.list_display_links:
return [
checks.Error(
"The value of '%s' cannot be in both 'list_editable' and 'list_display_links'." % field_name,
obj=obj.__class__,
id='admin.E123',
)
]
# If list_display[0] is in list_editable, check that
# list_display_links is set. See #22792 and #26229 for use cases.
elif (obj.list_display[0] == field_name and not obj.list_display_links and
obj.list_display_links is not None):
return [
checks.Error(
"The value of '%s' refers to the first field in 'list_display' ('%s'), "
"which cannot be used unless 'list_display_links' is set." % (
label, obj.list_display[0]
),
obj=obj.__class__,
id='admin.E124',
)
]
elif not field.editable:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not editable through the admin." % (
label, field_name
),
obj=obj.__class__,
id='admin.E125',
)
]
else:
return []
def _check_search_fields(self, obj):
""" Check search_fields is a sequence. """
if not isinstance(obj.search_fields, (list, tuple)):
return must_be('a list or tuple', option='search_fields', obj=obj, id='admin.E126')
else:
return []
def _check_date_hierarchy(self, obj):
""" Check that date_hierarchy refers to DateField or DateTimeField. """
if obj.date_hierarchy is None:
return []
else:
try:
field = get_fields_from_path(obj.model, obj.date_hierarchy)[-1]
except (NotRelationField, FieldDoesNotExist):
return [
checks.Error(
"The value of 'date_hierarchy' refers to '%s', which "
"does not refer to a Field." % obj.date_hierarchy,
obj=obj.__class__,
id='admin.E127',
)
]
else:
if not isinstance(field, (models.DateField, models.DateTimeField)):
return must_be('a DateField or DateTimeField', option='date_hierarchy', obj=obj, id='admin.E128')
else:
return []
class InlineModelAdminChecks(BaseModelAdminChecks):
def check(self, inline_obj, **kwargs):
errors = super().check(inline_obj)
parent_model = inline_obj.parent_model
errors.extend(self._check_relation(inline_obj, parent_model))
errors.extend(self._check_exclude_of_parent_model(inline_obj, parent_model))
errors.extend(self._check_extra(inline_obj))
errors.extend(self._check_max_num(inline_obj))
errors.extend(self._check_min_num(inline_obj))
errors.extend(self._check_formset(inline_obj))
return errors
def _check_exclude_of_parent_model(self, obj, parent_model):
# Do not perform more specific checks if the base checks result in an
# error.
errors = super()._check_exclude(obj)
if errors:
return []
# Skip if `fk_name` is invalid.
if self._check_relation(obj, parent_model):
return []
if obj.exclude is None:
return []
fk = _get_foreign_key(parent_model, obj.model, fk_name=obj.fk_name)
if fk.name in obj.exclude:
return [
checks.Error(
"Cannot exclude the field '%s', because it is the foreign key "
"to the parent model '%s.%s'." % (
fk.name, parent_model._meta.app_label, parent_model._meta.object_name
),
obj=obj.__class__,
id='admin.E201',
)
]
else:
return []
def _check_relation(self, obj, parent_model):
try:
_get_foreign_key(parent_model, obj.model, fk_name=obj.fk_name)
except ValueError as e:
return [checks.Error(e.args[0], obj=obj.__class__, id='admin.E202')]
else:
return []
def _check_extra(self, obj):
""" Check that extra is an integer. """
if not isinstance(obj.extra, int):
return must_be('an integer', option='extra', obj=obj, id='admin.E203')
else:
return []
def _check_max_num(self, obj):
""" Check that max_num is an integer. """
if obj.max_num is None:
return []
elif not isinstance(obj.max_num, int):
return must_be('an integer', option='max_num', obj=obj, id='admin.E204')
else:
return []
def _check_min_num(self, obj):
""" Check that min_num is an integer. """
if obj.min_num is None:
return []
elif not isinstance(obj.min_num, int):
return must_be('an integer', option='min_num', obj=obj, id='admin.E205')
else:
return []
def _check_formset(self, obj):
""" Check formset is a subclass of BaseModelFormSet. """
if not issubclass(obj.formset, BaseModelFormSet):
return must_inherit_from(parent='BaseModelFormSet', option='formset', obj=obj, id='admin.E206')
else:
return []
def must_be(type, option, obj, id):
return [
checks.Error(
"The value of '%s' must be %s." % (option, type),
obj=obj.__class__,
id=id,
),
]
def must_inherit_from(parent, option, obj, id):
return [
checks.Error(
"The value of '%s' must inherit from '%s'." % (option, parent),
obj=obj.__class__,
id=id,
),
]
def refer_to_missing_field(field, option, model, obj, id):
return [
checks.Error(
"The value of '%s' refers to '%s', which is not an attribute of '%s.%s'." % (
option, field, model._meta.app_label, model._meta.object_name
),
obj=obj.__class__,
id=id,
),
]
|
|
from seamless.core.transformation import SeamlessTransformationError
header = """
/*
The following C header has been auto-generated from the transformer schema
It will be used to generate bindings, but it will not be automatically
added to the compiled transformer code.
If your transformer code is written in C/C++, you may do so yourself.
For C, you may need to include "stdint.h" and "stdbool.h".
If your transform() function is written in C++, don't forget to add 'extern "C" '
*/
"""
if "type" not in input_schema:
raise SeamlessTransformationError("Input schema (transformer.inp.schema) needs to be defined in JSON schema format, containing at least 'type'")
json_to_c = {
"integer": "int",
("integer", 1): "int8_t",
("integer", 2): "int16_t",
("integer", 4): "int32_t",
("integer", 8): "int64_t",
"number": "double",
("number", 4): "float",
("number", 8): "double",
"boolean": "bool",
"string": "char",
}
def gen_struct_name(name, postfix="Struct"):
def capitalize(subname):
return "".join([subsubname.capitalize() for subsubname in subname.split("_")])
if isinstance(name, str):
name = (name,)
return "".join([capitalize(subname) for subname in name]) + postfix
def gen_basic_type(name, schema, *, verify_integer_bytesize, item=False):
name2 = name
if isinstance(name, (tuple, list)):
name2 = ".".join(name)
warnings = []
has_form = "form" in schema
if has_form:
form = schema["form"]
err = "'{0}' form schema does not provide ".format(name2)
else:
err = "'{0}' has no form schema that provides ".format(name2)
if item:
if "type" in schema:
type = schema["type"]
else:
if not has_form or "type" not in form:
raise SeamlessTransformationError("Item schema {0} must contain 'type' in its schema or form schema".format(name2))
type = form["type"]
else:
type = schema["type"]
ctype = json_to_c[type]
result = ctype
if type in ("integer", "number"):
if not has_form or "bytesize" not in form:
if type != "integer" or verify_integer_bytesize:
warnings.append(err + "'bytesize', assuming default type ('%s')" % ctype)
result = ctype
else:
result = json_to_c[type, form["bytesize"]]
if type == "integer":
if not has_form or "unsigned" not in form:
warnings.append(err + "'unsigned', assuming False")
else:
if form["unsigned"]:
if result.endswith("_t"):
result = "u" + result
else:
result = "unsigned " + result
###for warning in warnings:
### print("WARNING: " + warning)
return result
def gen_array(name, schema, *, verify_shape, const, is_result=False):
name2 = name
if isinstance(name, (tuple, list)):
name2 = ".".join(name)
if "form" not in schema:
raise SeamlessTransformationError("'{0}' schema must have form schema".format(name2))
form = schema["form"]
array_struct_name = gen_struct_name(name)
array_struct_members = []
if verify_shape and "shape" not in form:
raise SeamlessTransformationError("'{0}' form schema must have 'shape'".format(name2))
if "ndim" not in form:
raise SeamlessTransformationError("'{0}' form schema must have 'ndim'".format(name2))
array_struct_members.append(("unsigned int", "shape[%d]" % form["ndim"]))
warnings = []
if not verify_shape:
if "contiguous" not in form or not form["contiguous"]:
if "contiguous" not in form or "strides" not in form:
warn = "'{0}' form schema does not contain 'contiguous'. \
Explicit stride values will be provided.".format(name2)
warnings.append(warn)
array_struct_members.append(("unsigned int", "strides[%d]" % form["ndim"]))
itemschema = schema["items"]
if isinstance(itemschema, list):
raise NotImplementedError(name2) #heterogeneous arrays (tuples)
tname = name
struct_code = ""
if isinstance(name, str):
tname = (name,)
if type == "array":
raise NotImplementedError(name2) #nested array
elif type == "object":
req_storage = "pure-binary"
ctype, nested_struct_code = gen_struct(
tname+ ("item",), itemschema,
verify_pure_binary=True,
const=const
)
if const:
ctype = "const " + ctype
ctype += " *"
struct_code += nested_struct_code + "\n"
else:
req_storage = "binary"
ctype = gen_basic_type(
tname+ ("item",),
itemschema,
verify_integer_bytesize=True,
item=True
)
if "storage" not in schema or not schema["storage"].endswith(req_storage):
raise SeamlessTransformationError("'{0}' schema must have {1} storage defined".format(name2, req_storage))
ctype2 = ctype
if const and not ctype2.startswith("const "):
ctype2 = "const " + ctype
array_struct_members.insert(0, (ctype2, "*data"))
array_struct_code = gen_struct_code(array_struct_name, array_struct_members)
for warning in warnings:
print("WARNING: " + warning)
struct_code += array_struct_code
return array_struct_name, struct_code
def gen_struct_code(name, members):
struct_code = "typedef struct {0} {{\n".format(name)
for type, member_name in members:
type = type.strip()
if type[-1] != "*":
type += " "
struct_code += " {0}{1};\n".format(type, member_name)
struct_code += "}} {0};\n\n".format(name)
return struct_code
def gen_struct(name, schema, *, verify_pure_binary, const):
name2 = name
if isinstance(name, (tuple, list)):
name2 = ".".join(name)
if verify_pure_binary is not None:
req_storage = "pure-binary" if verify_pure_binary else "binary"
if "storage" not in schema or not schema["storage"].endswith(req_storage):
raise SeamlessTransformationError("'{0}' schema must have {1} storage defined".format(name2, req_storage))
struct_name = gen_struct_name(name)
struct_members = []
tname = name
if isinstance(name, str):
tname = (name,)
struct_code = ""
for propname, propschema in schema["properties"].items():
type = propschema["type"]
pname = tname + (propname,)
if type == "array":
ctype, nested_struct_code = gen_array(
pname, propschema,
verify_shape=True,
const=const
)
if const:
ctype = "const " + ctype
ctype += " *"
struct_code += nested_struct_code
elif type == "object":
ctype, nested_struct_code = gen_struct(
pname, propschema,
verify_pure_binary=True
)
if const:
ctype = "const " + ctype
ctype += " *"
struct_code += nested_struct_code
else:
ctype = gen_basic_type(propname, propschema, verify_integer_bytesize=True)
struct_members.append((ctype, propname))
struct_code += gen_struct_code(struct_name, struct_members)
return struct_name, struct_code
###print("input schema:", input_schema)
###print("result schema:", result_schema)
input_args = []
input_jtype = input_schema["type"]
if input_jtype == "array":
raise NotImplementedError
elif input_jtype == "object":
input_props = input_schema["properties"]
else:
input_props = {input_name: input_schema}
for pin in inputpins:
if pin not in input_props:
raise SeamlessTransformationError("Input pin '%s' is not in input schema" % pin)
order = input_schema.get("order", [])
for propname in sorted(input_props.keys()):
if propname not in order:
order.append(propname)
for propnr, propname in enumerate(order):
propschema = input_props[propname]
if "type" not in propschema:
raise SeamlessTransformationError("Property '%s' needs to have its type defined" % propname)
prop_jtype = propschema["type"]
if prop_jtype == "object":
raise NotImplementedError #input structs
elif prop_jtype == "array":
prop_ctype, array_struct_header = gen_array(propname, propschema, verify_shape=False, const=True)
prop_ctype = "const " + prop_ctype + "*"
header += array_struct_header
else:
prop_ctype = gen_basic_type(propname, propschema, verify_integer_bytesize=False)
input_args.append(prop_ctype + " " + propname)
if "type" not in result_schema:
raise SeamlessTransformationError("Result schema (transformer.result.schema) needs to be defined in JSON schema format, containing at least 'type'")
return_jtype = result_schema["type"]
if return_jtype == "object":
return_ctype = "void"
output_ctype, struct_header = gen_struct(result_name, result_schema, verify_pure_binary=None, const=False)
header += struct_header
input_args.append(output_ctype + " *" + result_name)
elif return_jtype == "array":
return_ctype = "void"
output_ctype, struct_header = gen_array(
result_name, result_schema,
verify_shape=False, const=False, is_result=True
)
header += struct_header
input_args.append(output_ctype + " *" + result_name)
else:
output_ctype = gen_basic_type(result_name, result_schema, verify_integer_bytesize=False)
input_args.append(output_ctype + " *" + result_name)
input_args = ", ".join(input_args)
result = header
result += "int transform({});".format(input_args)
|
|
# Copyright 2015 PLUMgrid
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import atexit
import ctypes as ct
import fcntl
import json
import os
import re
import struct
import errno
import sys
basestring = (unicode if sys.version_info[0] < 3 else str)
from .libbcc import lib, bcc_symbol, bcc_symbol_option, bcc_stacktrace_build_id, _SYM_CB_TYPE
from .table import Table, PerfEventArray
from .perf import Perf
from .utils import get_online_cpus, printb, _assert_is_bytes, ArgString
from .version import __version__
from .disassembler import disassemble_prog, decode_map
_probe_limit = 1000
_num_open_probes = 0
# for tests
def _get_num_open_probes():
global _num_open_probes
return _num_open_probes
TRACEFS = "/sys/kernel/debug/tracing"
# Debug flags
# Debug output compiled LLVM IR.
DEBUG_LLVM_IR = 0x1
# Debug output loaded BPF bytecode and register state on branches.
DEBUG_BPF = 0x2
# Debug output pre-processor result.
DEBUG_PREPROCESSOR = 0x4
# Debug output ASM instructions embedded with source.
DEBUG_SOURCE = 0x8
# Debug output register state on all instructions in addition to DEBUG_BPF.
DEBUG_BPF_REGISTER_STATE = 0x10
# Debug BTF.
DEBUG_BTF = 0x20
class SymbolCache(object):
def __init__(self, pid):
self.cache = lib.bcc_symcache_new(
pid, ct.cast(None, ct.POINTER(bcc_symbol_option)))
def resolve(self, addr, demangle):
"""
Return a tuple of the symbol (function), its offset from the beginning
of the function, and the module in which it lies. For example:
("start_thread", 0x202, "/usr/lib/.../libpthread-2.24.so")
If the symbol cannot be found but we know which module it is in,
return the module name and the offset from the beginning of the
module. If we don't even know the module, return the absolute
address as the offset.
"""
sym = bcc_symbol()
if demangle:
res = lib.bcc_symcache_resolve(self.cache, addr, ct.byref(sym))
else:
res = lib.bcc_symcache_resolve_no_demangle(self.cache, addr,
ct.byref(sym))
if res < 0:
if sym.module and sym.offset:
return (None, sym.offset,
ct.cast(sym.module, ct.c_char_p).value)
return (None, addr, None)
if demangle:
name_res = sym.demangle_name
lib.bcc_symbol_free_demangle_name(ct.byref(sym))
else:
name_res = sym.name
return (name_res, sym.offset, ct.cast(sym.module, ct.c_char_p).value)
def resolve_name(self, module, name):
module = _assert_is_bytes(module)
name = _assert_is_bytes(name)
addr = ct.c_ulonglong()
if lib.bcc_symcache_resolve_name(self.cache, module, name,
ct.byref(addr)) < 0:
return -1
return addr.value
class PerfType:
# From perf_type_id in uapi/linux/perf_event.h
HARDWARE = 0
SOFTWARE = 1
class PerfHWConfig:
# From perf_hw_id in uapi/linux/perf_event.h
CPU_CYCLES = 0
INSTRUCTIONS = 1
CACHE_REFERENCES = 2
CACHE_MISSES = 3
BRANCH_INSTRUCTIONS = 4
BRANCH_MISSES = 5
BUS_CYCLES = 6
STALLED_CYCLES_FRONTEND = 7
STALLED_CYCLES_BACKEND = 8
REF_CPU_CYCLES = 9
class PerfSWConfig:
# From perf_sw_id in uapi/linux/perf_event.h
CPU_CLOCK = 0
TASK_CLOCK = 1
PAGE_FAULTS = 2
CONTEXT_SWITCHES = 3
CPU_MIGRATIONS = 4
PAGE_FAULTS_MIN = 5
PAGE_FAULTS_MAJ = 6
ALIGNMENT_FAULTS = 7
EMULATION_FAULTS = 8
DUMMY = 9
BPF_OUTPUT = 10
class BPF(object):
# From bpf_prog_type in uapi/linux/bpf.h
SOCKET_FILTER = 1
KPROBE = 2
SCHED_CLS = 3
SCHED_ACT = 4
TRACEPOINT = 5
XDP = 6
PERF_EVENT = 7
CGROUP_SKB = 8
CGROUP_SOCK = 9
LWT_IN = 10
LWT_OUT = 11
LWT_XMIT = 12
SOCK_OPS = 13
SK_SKB = 14
CGROUP_DEVICE = 15
SK_MSG = 16
RAW_TRACEPOINT = 17
CGROUP_SOCK_ADDR = 18
# from xdp_action uapi/linux/bpf.h
XDP_ABORTED = 0
XDP_DROP = 1
XDP_PASS = 2
XDP_TX = 3
XDP_REDIRECT = 4
_probe_repl = re.compile(b"[^a-zA-Z0-9_]")
_sym_caches = {}
_bsymcache = lib.bcc_buildsymcache_new()
_auto_includes = {
"linux/time.h": ["time"],
"linux/fs.h": ["fs", "file"],
"linux/blkdev.h": ["bio", "request"],
"linux/slab.h": ["alloc"],
"linux/netdevice.h": ["sk_buff", "net_device"]
}
_syscall_prefixes = [
b"sys_",
b"__x64_sys_",
b"__x32_compat_sys_",
b"__ia32_compat_sys_",
b"__arm64_sys_",
]
# BPF timestamps come from the monotonic clock. To be able to filter
# and compare them from Python, we need to invoke clock_gettime.
# Adapted from http://stackoverflow.com/a/1205762
CLOCK_MONOTONIC = 1 # see <linux/time.h>
class timespec(ct.Structure):
_fields_ = [('tv_sec', ct.c_long), ('tv_nsec', ct.c_long)]
_librt = ct.CDLL('librt.so.1', use_errno=True)
_clock_gettime = _librt.clock_gettime
_clock_gettime.argtypes = [ct.c_int, ct.POINTER(timespec)]
@classmethod
def monotonic_time(cls):
"""monotonic_time()
Returns the system monotonic time from clock_gettime, using the
CLOCK_MONOTONIC constant. The time returned is in nanoseconds.
"""
t = cls.timespec()
if cls._clock_gettime(cls.CLOCK_MONOTONIC, ct.byref(t)) != 0:
errno = ct.get_errno()
raise OSError(errno, os.strerror(errno))
return t.tv_sec * 1e9 + t.tv_nsec
@classmethod
def generate_auto_includes(cls, program_words):
"""
Generates #include statements automatically based on a set of
recognized types such as sk_buff and bio. The input is all the words
that appear in the BPF program, and the output is a (possibly empty)
string of #include statements, such as "#include <linux/fs.h>".
"""
headers = ""
for header, keywords in cls._auto_includes.items():
for keyword in keywords:
for word in program_words:
if keyword in word and header not in headers:
headers += "#include <%s>\n" % header
return headers
# defined for compatibility reasons, to be removed
Table = Table
class Function(object):
def __init__(self, bpf, name, fd):
self.bpf = bpf
self.name = name
self.fd = fd
@staticmethod
def _find_file(filename):
""" If filename is invalid, search in ./ of argv[0] """
if filename:
if not os.path.isfile(filename):
argv0 = ArgString(sys.argv[0])
t = b"/".join([os.path.abspath(os.path.dirname(argv0.__str__())), filename])
if os.path.isfile(t):
filename = t
else:
raise Exception("Could not find file %s" % filename)
return filename
@staticmethod
def find_exe(bin_path):
"""
find_exe(bin_path)
Traverses the PATH environment variable, looking for the first
directory that contains an executable file named bin_path, and
returns the full path to that file, or None if no such file
can be found. This is meant to replace invocations of the
"which" shell utility, which doesn't have portable semantics
for skipping aliases.
"""
# Source: http://stackoverflow.com/a/377028
def is_exe(fpath):
return os.path.isfile(fpath) and \
os.access(fpath, os.X_OK)
fpath, fname = os.path.split(bin_path)
if fpath:
if is_exe(bin_path):
return bin_path
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, bin_path)
if is_exe(exe_file):
return exe_file
return None
def __init__(self, src_file=b"", hdr_file=b"", text=None, debug=0,
cflags=[], usdt_contexts=[], allow_rlimit=True):
"""Create a new BPF module with the given source code.
Note:
All fields are marked as optional, but either `src_file` or `text`
must be supplied, and not both.
Args:
src_file (Optional[str]): Path to a source file for the module
hdr_file (Optional[str]): Path to a helper header file for the `src_file`
text (Optional[str]): Contents of a source file for the module
debug (Optional[int]): Flags used for debug prints, can be |'d together
See "Debug flags" for explanation
"""
src_file = _assert_is_bytes(src_file)
hdr_file = _assert_is_bytes(hdr_file)
text = _assert_is_bytes(text)
self.kprobe_fds = {}
self.uprobe_fds = {}
self.tracepoint_fds = {}
self.raw_tracepoint_fds = {}
self.perf_buffers = {}
self.open_perf_events = {}
self.tracefile = None
atexit.register(self.cleanup)
self.debug = debug
self.funcs = {}
self.tables = {}
self.module = None
cflags_array = (ct.c_char_p * len(cflags))()
for i, s in enumerate(cflags): cflags_array[i] = bytes(ArgString(s))
if text:
ctx_array = (ct.c_void_p * len(usdt_contexts))()
for i, usdt in enumerate(usdt_contexts):
ctx_array[i] = ct.c_void_p(usdt.get_context())
usdt_text = lib.bcc_usdt_genargs(ctx_array, len(usdt_contexts))
if usdt_text is None:
raise Exception("can't generate USDT probe arguments; " +
"possible cause is missing pid when a " +
"probe in a shared object has multiple " +
"locations")
text = usdt_text + text
if text:
self.module = lib.bpf_module_create_c_from_string(text,
self.debug, cflags_array, len(cflags_array), allow_rlimit)
if not self.module:
raise Exception("Failed to compile BPF text")
else:
src_file = BPF._find_file(src_file)
hdr_file = BPF._find_file(hdr_file)
if src_file.endswith(b".b"):
self.module = lib.bpf_module_create_b(src_file, hdr_file,
self.debug)
else:
self.module = lib.bpf_module_create_c(src_file, self.debug,
cflags_array, len(cflags_array), allow_rlimit)
if not self.module:
raise Exception("Failed to compile BPF module %s" % src_file)
for usdt_context in usdt_contexts:
usdt_context.attach_uprobes(self)
# If any "kprobe__" or "tracepoint__" or "raw_tracepoint__"
# prefixed functions were defined,
# they will be loaded and attached here.
self._trace_autoload()
def load_funcs(self, prog_type=KPROBE):
"""load_funcs(prog_type=KPROBE)
Load all functions in this BPF module with the given type.
Returns a list of the function handles."""
fns = []
for i in range(0, lib.bpf_num_functions(self.module)):
func_name = lib.bpf_function_name(self.module, i)
fns.append(self.load_func(func_name, prog_type))
return fns
def load_func(self, func_name, prog_type):
func_name = _assert_is_bytes(func_name)
if func_name in self.funcs:
return self.funcs[func_name]
if not lib.bpf_function_start(self.module, func_name):
raise Exception("Unknown program %s" % func_name)
log_level = 0
if (self.debug & DEBUG_BPF_REGISTER_STATE):
log_level = 2
elif (self.debug & DEBUG_BPF):
log_level = 1
fd = lib.bcc_func_load(self.module, prog_type, func_name,
lib.bpf_function_start(self.module, func_name),
lib.bpf_function_size(self.module, func_name),
lib.bpf_module_license(self.module),
lib.bpf_module_kern_version(self.module),
log_level, None, 0);
if fd < 0:
atexit.register(self.donothing)
if ct.get_errno() == errno.EPERM:
raise Exception("Need super-user privileges to run")
errstr = os.strerror(ct.get_errno())
raise Exception("Failed to load BPF program %s: %s" %
(func_name, errstr))
fn = BPF.Function(self, func_name, fd)
self.funcs[func_name] = fn
return fn
def dump_func(self, func_name):
"""
Return the eBPF bytecodes for the specified function as a string
"""
func_name = _assert_is_bytes(func_name)
if not lib.bpf_function_start(self.module, func_name):
raise Exception("Unknown program %s" % func_name)
start, = lib.bpf_function_start(self.module, func_name),
size, = lib.bpf_function_size(self.module, func_name),
return ct.string_at(start, size)
def disassemble_func(self, func_name):
bpfstr = self.dump_func(func_name)
return disassemble_prog(func_name, bpfstr)
def decode_table(self, table_name, sizeinfo=False):
table_obj = self[table_name]
table_type = lib.bpf_table_type_id(self.module, table_obj.map_id)
return decode_map(table_name, table_obj, table_type, sizeinfo=sizeinfo)
str2ctype = {
u"_Bool": ct.c_bool,
u"char": ct.c_char,
u"wchar_t": ct.c_wchar,
u"unsigned char": ct.c_ubyte,
u"short": ct.c_short,
u"unsigned short": ct.c_ushort,
u"int": ct.c_int,
u"unsigned int": ct.c_uint,
u"long": ct.c_long,
u"unsigned long": ct.c_ulong,
u"long long": ct.c_longlong,
u"unsigned long long": ct.c_ulonglong,
u"float": ct.c_float,
u"double": ct.c_double,
u"long double": ct.c_longdouble,
u"__int128": ct.c_int64 * 2,
u"unsigned __int128": ct.c_uint64 * 2,
}
@staticmethod
def _decode_table_type(desc):
if isinstance(desc, basestring):
return BPF.str2ctype[desc]
anon = []
fields = []
for t in desc[1]:
if len(t) == 2:
fields.append((t[0], BPF._decode_table_type(t[1])))
elif len(t) == 3:
if isinstance(t[2], list):
fields.append((t[0], BPF._decode_table_type(t[1]) * t[2][0]))
elif isinstance(t[2], int):
fields.append((t[0], BPF._decode_table_type(t[1]), t[2]))
elif isinstance(t[2], basestring) and (
t[2] == u"union" or t[2] == u"struct" or
t[2] == u"struct_packed"):
name = t[0]
if name == "":
name = "__anon%d" % len(anon)
anon.append(name)
fields.append((name, BPF._decode_table_type(t)))
else:
raise Exception("Failed to decode type %s" % str(t))
else:
raise Exception("Failed to decode type %s" % str(t))
base = ct.Structure
is_packed = False
if len(desc) > 2:
if desc[2] == u"union":
base = ct.Union
elif desc[2] == u"struct":
base = ct.Structure
elif desc[2] == u"struct_packed":
base = ct.Structure
is_packed = True
if is_packed:
cls = type(str(desc[0]), (base,), dict(_anonymous_=anon, _pack_=1,
_fields_=fields))
else:
cls = type(str(desc[0]), (base,), dict(_anonymous_=anon,
_fields_=fields))
return cls
def get_table(self, name, keytype=None, leaftype=None, reducer=None):
name = _assert_is_bytes(name)
map_id = lib.bpf_table_id(self.module, name)
map_fd = lib.bpf_table_fd(self.module, name)
if map_fd < 0:
raise KeyError
if not keytype:
key_desc = lib.bpf_table_key_desc(self.module, name).decode("utf-8")
if not key_desc:
raise Exception("Failed to load BPF Table %s key desc" % name)
keytype = BPF._decode_table_type(json.loads(key_desc))
if not leaftype:
leaf_desc = lib.bpf_table_leaf_desc(self.module, name).decode("utf-8")
if not leaf_desc:
raise Exception("Failed to load BPF Table %s leaf desc" % name)
leaftype = BPF._decode_table_type(json.loads(leaf_desc))
return Table(self, map_id, map_fd, keytype, leaftype, name, reducer=reducer)
def __getitem__(self, key):
if key not in self.tables:
self.tables[key] = self.get_table(key)
return self.tables[key]
def __setitem__(self, key, leaf):
self.tables[key] = leaf
def __len__(self):
return len(self.tables)
def __delitem__(self, key):
del self.tables[key]
def __iter__(self):
return self.tables.__iter__()
@staticmethod
def attach_raw_socket(fn, dev):
dev = _assert_is_bytes(dev)
if not isinstance(fn, BPF.Function):
raise Exception("arg 1 must be of type BPF.Function")
sock = lib.bpf_open_raw_sock(dev)
if sock < 0:
errstr = os.strerror(ct.get_errno())
raise Exception("Failed to open raw device %s: %s" % (dev, errstr))
res = lib.bpf_attach_socket(sock, fn.fd)
if res < 0:
errstr = os.strerror(ct.get_errno())
raise Exception("Failed to attach BPF to device %s: %s"
% (dev, errstr))
fn.sock = sock
@staticmethod
def get_kprobe_functions(event_re):
with open("%s/../kprobes/blacklist" % TRACEFS, "rb") as blacklist_f:
blacklist = set([line.rstrip().split()[1] for line in blacklist_f])
fns = []
in_init_section = 0
in_irq_section = 0
with open("/proc/kallsyms", "rb") as avail_file:
for line in avail_file:
(t, fn) = line.rstrip().split()[1:3]
# Skip all functions defined between __init_begin and
# __init_end
if in_init_section == 0:
if fn == b'__init_begin':
in_init_section = 1
continue
elif in_init_section == 1:
if fn == b'__init_end':
in_init_section = 2
continue
# Skip all functions defined between __irqentry_text_start and
# __irqentry_text_end
if in_irq_section == 0:
if fn == b'__irqentry_text_start':
in_irq_section = 1
continue
elif in_irq_section == 1:
if fn == b'__irqentry_text_end':
in_irq_section = 2
continue
# All functions defined as NOKPROBE_SYMBOL() start with the
# prefix _kbl_addr_*, blacklisting them by looking at the name
# allows to catch also those symbols that are defined in kernel
# modules.
if fn.startswith(b'_kbl_addr_'):
continue
# Explicitly blacklist perf-related functions, they are all
# non-attachable.
elif fn.startswith(b'__perf') or fn.startswith(b'perf_'):
continue
# Exclude all gcc 8's extra .cold functions
elif re.match(b'^.*\.cold\.\d+$', fn):
continue
if (t.lower() in [b't', b'w']) and re.match(event_re, fn) \
and fn not in blacklist:
fns.append(fn)
return set(fns) # Some functions may appear more than once
def _check_probe_quota(self, num_new_probes):
global _num_open_probes
if _num_open_probes + num_new_probes > _probe_limit:
raise Exception("Number of open probes would exceed global quota")
def _add_kprobe_fd(self, name, fd):
global _num_open_probes
self.kprobe_fds[name] = fd
_num_open_probes += 1
def _del_kprobe_fd(self, name):
global _num_open_probes
del self.kprobe_fds[name]
_num_open_probes -= 1
def _add_uprobe_fd(self, name, fd):
global _num_open_probes
self.uprobe_fds[name] = fd
_num_open_probes += 1
def _del_uprobe_fd(self, name):
global _num_open_probes
del self.uprobe_fds[name]
_num_open_probes -= 1
# Find current system's syscall prefix by testing on the BPF syscall.
# If no valid value found, will return the first possible value which
# would probably lead to error in later API calls.
def get_syscall_prefix(self):
for prefix in self._syscall_prefixes:
if self.ksymname(b"%sbpf" % prefix) != -1:
return prefix
return self._syscall_prefixes[0]
# Given a syscall's name, return the full Kernel function name with current
# system's syscall prefix. For example, given "clone" the helper would
# return "sys_clone" or "__x64_sys_clone".
def get_syscall_fnname(self, name):
name = _assert_is_bytes(name)
return self.get_syscall_prefix() + name
# Given a Kernel function name that represents a syscall but already has a
# prefix included, transform it to current system's prefix. For example,
# if "sys_clone" provided, the helper may translate it to "__x64_sys_clone".
def fix_syscall_fnname(self, name):
name = _assert_is_bytes(name)
for prefix in self._syscall_prefixes:
if name.startswith(prefix):
return self.get_syscall_fnname(name[len(prefix):])
return name
def attach_kprobe(self, event=b"", event_off=0, fn_name=b"", event_re=b""):
event = _assert_is_bytes(event)
fn_name = _assert_is_bytes(fn_name)
event_re = _assert_is_bytes(event_re)
# allow the caller to glob multiple functions together
if event_re:
matches = BPF.get_kprobe_functions(event_re)
self._check_probe_quota(len(matches))
for line in matches:
try:
self.attach_kprobe(event=line, fn_name=fn_name)
except:
pass
return
self._check_probe_quota(1)
fn = self.load_func(fn_name, BPF.KPROBE)
ev_name = b"p_" + event.replace(b"+", b"_").replace(b".", b"_")
fd = lib.bpf_attach_kprobe(fn.fd, 0, ev_name, event, event_off, 0)
if fd < 0:
raise Exception("Failed to attach BPF program %s to kprobe %s" %
(fn_name, event))
self._add_kprobe_fd(ev_name, fd)
return self
def attach_kretprobe(self, event=b"", fn_name=b"", event_re=b"", maxactive=0):
event = _assert_is_bytes(event)
fn_name = _assert_is_bytes(fn_name)
event_re = _assert_is_bytes(event_re)
# allow the caller to glob multiple functions together
if event_re:
for line in BPF.get_kprobe_functions(event_re):
try:
self.attach_kretprobe(event=line, fn_name=fn_name,
maxactive=maxactive)
except:
pass
return
self._check_probe_quota(1)
fn = self.load_func(fn_name, BPF.KPROBE)
ev_name = b"r_" + event.replace(b"+", b"_").replace(b".", b"_")
fd = lib.bpf_attach_kprobe(fn.fd, 1, ev_name, event, 0, maxactive)
if fd < 0:
raise Exception("Failed to attach BPF program %s to kretprobe %s" %
(fn_name, event))
self._add_kprobe_fd(ev_name, fd)
return self
def detach_kprobe_event(self, ev_name):
if ev_name not in self.kprobe_fds:
raise Exception("Kprobe %s is not attached" % ev_name)
res = lib.bpf_close_perf_event_fd(self.kprobe_fds[ev_name])
if res < 0:
raise Exception("Failed to close kprobe FD")
res = lib.bpf_detach_kprobe(ev_name)
if res < 0:
raise Exception("Failed to detach BPF from kprobe")
self._del_kprobe_fd(ev_name)
def detach_kprobe(self, event):
event = _assert_is_bytes(event)
ev_name = b"p_" + event.replace(b"+", b"_").replace(b".", b"_")
self.detach_kprobe_event(ev_name)
def detach_kretprobe(self, event):
event = _assert_is_bytes(event)
ev_name = b"r_" + event.replace(b"+", b"_").replace(b".", b"_")
self.detach_kprobe_event(ev_name)
@staticmethod
def attach_xdp(dev, fn, flags=0):
'''
This function attaches a BPF function to a device on the device
driver level (XDP)
'''
dev = _assert_is_bytes(dev)
if not isinstance(fn, BPF.Function):
raise Exception("arg 1 must be of type BPF.Function")
res = lib.bpf_attach_xdp(dev, fn.fd, flags)
if res < 0:
err_no = ct.get_errno()
if err_no == errno.EBADMSG:
raise Exception("Internal error while attaching BPF to device,"+
" try increasing the debug level!")
else:
errstr = os.strerror(err_no)
raise Exception("Failed to attach BPF to device %s: %s"
% (dev, errstr))
@staticmethod
def remove_xdp(dev, flags=0):
'''
This function removes any BPF function from a device on the
device driver level (XDP)
'''
dev = _assert_is_bytes(dev)
res = lib.bpf_attach_xdp(dev, -1, flags)
if res < 0:
errstr = os.strerror(ct.get_errno())
raise Exception("Failed to detach BPF from device %s: %s"
% (dev, errstr))
@classmethod
def _check_path_symbol(cls, module, symname, addr, pid):
module = _assert_is_bytes(module)
symname = _assert_is_bytes(symname)
sym = bcc_symbol()
c_pid = 0 if pid == -1 else pid
if lib.bcc_resolve_symname(
module, symname,
addr or 0x0, c_pid,
ct.cast(None, ct.POINTER(bcc_symbol_option)),
ct.byref(sym),
) < 0:
raise Exception("could not determine address of symbol %s" % symname)
module_path = ct.cast(sym.module, ct.c_char_p).value
lib.bcc_procutils_free(sym.module)
return module_path, sym.offset
@staticmethod
def find_library(libname):
libname = _assert_is_bytes(libname)
res = lib.bcc_procutils_which_so(libname, 0)
if not res:
return None
libpath = ct.cast(res, ct.c_char_p).value
lib.bcc_procutils_free(res)
return libpath
@staticmethod
def get_tracepoints(tp_re):
results = []
events_dir = os.path.join(TRACEFS, "events")
for category in os.listdir(events_dir):
cat_dir = os.path.join(events_dir, category)
if not os.path.isdir(cat_dir):
continue
for event in os.listdir(cat_dir):
evt_dir = os.path.join(cat_dir, event)
if os.path.isdir(evt_dir):
tp = ("%s:%s" % (category, event))
if re.match(tp_re, tp):
results.append(tp)
return results
@staticmethod
def tracepoint_exists(category, event):
evt_dir = os.path.join(TRACEFS, "events", category, event)
return os.path.isdir(evt_dir)
def attach_tracepoint(self, tp=b"", tp_re=b"", fn_name=b""):
"""attach_tracepoint(tp="", tp_re="", fn_name="")
Run the bpf function denoted by fn_name every time the kernel tracepoint
specified by 'tp' is hit. The optional parameters pid, cpu, and group_fd
can be used to filter the probe. The tracepoint specification is simply
the tracepoint category and the tracepoint name, separated by a colon.
For example: sched:sched_switch, syscalls:sys_enter_bind, etc.
Instead of a tracepoint name, a regular expression can be provided in
tp_re. The program will then attach to tracepoints that match the
provided regular expression.
To obtain a list of kernel tracepoints, use the tplist tool or cat the
file /sys/kernel/debug/tracing/available_events.
Examples:
BPF(text).attach_tracepoint(tp="sched:sched_switch", fn_name="on_switch")
BPF(text).attach_tracepoint(tp_re="sched:.*", fn_name="on_switch")
"""
tp = _assert_is_bytes(tp)
tp_re = _assert_is_bytes(tp_re)
fn_name = _assert_is_bytes(fn_name)
if tp_re:
for tp in BPF.get_tracepoints(tp_re):
self.attach_tracepoint(tp=tp, fn_name=fn_name)
return
fn = self.load_func(fn_name, BPF.TRACEPOINT)
(tp_category, tp_name) = tp.split(b':')
fd = lib.bpf_attach_tracepoint(fn.fd, tp_category, tp_name)
if fd < 0:
raise Exception("Failed to attach BPF program %s to tracepoint %s" %
(fn_name, tp))
self.tracepoint_fds[tp] = fd
return self
def attach_raw_tracepoint(self, tp=b"", fn_name=b""):
"""attach_raw_tracepoint(self, tp=b"", fn_name=b"")
Run the bpf function denoted by fn_name every time the kernel tracepoint
specified by 'tp' is hit. The bpf function should be loaded as a
RAW_TRACEPOINT type. The fn_name is the kernel tracepoint name,
e.g., sched_switch, sys_enter_bind, etc.
Examples:
BPF(text).attach_raw_tracepoint(tp="sched_switch", fn_name="on_switch")
"""
tp = _assert_is_bytes(tp)
if tp in self.raw_tracepoint_fds:
raise Exception("Raw tracepoint %s has been attached" % tp)
fn_name = _assert_is_bytes(fn_name)
fn = self.load_func(fn_name, BPF.RAW_TRACEPOINT)
fd = lib.bpf_attach_raw_tracepoint(fn.fd, tp)
if fd < 0:
raise Exception("Failed to attach BPF to raw tracepoint")
self.raw_tracepoint_fds[tp] = fd;
return self
def detach_raw_tracepoint(self, tp=b""):
"""detach_raw_tracepoint(tp="")
Stop running the bpf function that is attached to the kernel tracepoint
specified by 'tp'.
Example: bpf.detach_raw_tracepoint("sched_switch")
"""
tp = _assert_is_bytes(tp)
if tp not in self.raw_tracepoint_fds:
raise Exception("Raw tracepoint %s is not attached" % tp)
os.close(self.raw_tracepoint_fds[tp])
del self.raw_tracepoint_fds[tp]
@staticmethod
def support_raw_tracepoint():
# kernel symbol "bpf_find_raw_tracepoint" indicates raw_tracepint support
if BPF.ksymname("bpf_find_raw_tracepoint") != -1:
return True
return False
def detach_tracepoint(self, tp=b""):
"""detach_tracepoint(tp="")
Stop running a bpf function that is attached to the kernel tracepoint
specified by 'tp'.
Example: bpf.detach_tracepoint("sched:sched_switch")
"""
tp = _assert_is_bytes(tp)
if tp not in self.tracepoint_fds:
raise Exception("Tracepoint %s is not attached" % tp)
res = lib.bpf_close_perf_event_fd(self.tracepoint_fds[tp])
if res < 0:
raise Exception("Failed to detach BPF from tracepoint")
(tp_category, tp_name) = tp.split(b':')
res = lib.bpf_detach_tracepoint(tp_category, tp_name)
if res < 0:
raise Exception("Failed to detach BPF from tracepoint")
del self.tracepoint_fds[tp]
def _attach_perf_event(self, progfd, ev_type, ev_config,
sample_period, sample_freq, pid, cpu, group_fd):
res = lib.bpf_attach_perf_event(progfd, ev_type, ev_config,
sample_period, sample_freq, pid, cpu, group_fd)
if res < 0:
raise Exception("Failed to attach BPF to perf event")
return res
def attach_perf_event(self, ev_type=-1, ev_config=-1, fn_name=b"",
sample_period=0, sample_freq=0, pid=-1, cpu=-1, group_fd=-1):
fn_name = _assert_is_bytes(fn_name)
fn = self.load_func(fn_name, BPF.PERF_EVENT)
res = {}
if cpu >= 0:
res[cpu] = self._attach_perf_event(fn.fd, ev_type, ev_config,
sample_period, sample_freq, pid, cpu, group_fd)
else:
for i in get_online_cpus():
res[i] = self._attach_perf_event(fn.fd, ev_type, ev_config,
sample_period, sample_freq, pid, i, group_fd)
self.open_perf_events[(ev_type, ev_config)] = res
def detach_perf_event(self, ev_type=-1, ev_config=-1):
try:
fds = self.open_perf_events[(ev_type, ev_config)]
except KeyError:
raise Exception("Perf event type {} config {} not attached".format(
ev_type, ev_config))
res = 0
for fd in fds.values():
res = lib.bpf_close_perf_event_fd(fd) or res
if res != 0:
raise Exception("Failed to detach BPF from perf event")
del self.open_perf_events[(ev_type, ev_config)]
@staticmethod
def get_user_functions(name, sym_re):
return set([name for (name, _) in
BPF.get_user_functions_and_addresses(name, sym_re)])
@staticmethod
def get_user_addresses(name, sym_re):
"""
We are returning addresses here instead of symbol names because it
turns out that the same name may appear multiple times with different
addresses, and the same address may appear multiple times with the same
name. We can't attach a uprobe to the same address more than once, so
it makes sense to return the unique set of addresses that are mapped to
a symbol that matches the provided regular expression.
"""
return set([address for (_, address) in
BPF.get_user_functions_and_addresses(name, sym_re)])
@staticmethod
def get_user_functions_and_addresses(name, sym_re):
name = _assert_is_bytes(name)
sym_re = _assert_is_bytes(sym_re)
addresses = []
def sym_cb(sym_name, addr):
dname = sym_name
if re.match(sym_re, dname):
addresses.append((dname, addr))
return 0
res = lib.bcc_foreach_function_symbol(name, _SYM_CB_TYPE(sym_cb))
if res < 0:
raise Exception("Error %d enumerating symbols in %s" % (res, name))
return addresses
def _get_uprobe_evname(self, prefix, path, addr, pid):
if pid == -1:
return b"%s_%s_0x%x" % (prefix, self._probe_repl.sub(b"_", path), addr)
else:
# if pid is valid, put pid in the name, so different pid
# can have different event names
return b"%s_%s_0x%x_%d" % (prefix, self._probe_repl.sub(b"_", path), addr, pid)
def attach_uprobe(self, name=b"", sym=b"", sym_re=b"", addr=None,
fn_name=b"", pid=-1):
"""attach_uprobe(name="", sym="", sym_re="", addr=None, fn_name=""
pid=-1)
Run the bpf function denoted by fn_name every time the symbol sym in
the library or binary 'name' is encountered. The real address addr may
be supplied in place of sym. Optional parameters pid, cpu, and group_fd
can be used to filter the probe.
Instead of a symbol name, a regular expression can be provided in
sym_re. The uprobe will then attach to symbols that match the provided
regular expression.
Libraries can be given in the name argument without the lib prefix, or
with the full path (/usr/lib/...). Binaries can be given only with the
full path (/bin/sh). If a PID is given, the uprobe will attach to the
version of the library used by the process.
Example: BPF(text).attach_uprobe("c", "malloc")
BPF(text).attach_uprobe("/usr/bin/python", "main")
"""
name = _assert_is_bytes(name)
sym = _assert_is_bytes(sym)
sym_re = _assert_is_bytes(sym_re)
fn_name = _assert_is_bytes(fn_name)
if sym_re:
addresses = BPF.get_user_addresses(name, sym_re)
self._check_probe_quota(len(addresses))
for sym_addr in addresses:
self.attach_uprobe(name=name, addr=sym_addr,
fn_name=fn_name, pid=pid)
return
(path, addr) = BPF._check_path_symbol(name, sym, addr, pid)
self._check_probe_quota(1)
fn = self.load_func(fn_name, BPF.KPROBE)
ev_name = self._get_uprobe_evname(b"p", path, addr, pid)
fd = lib.bpf_attach_uprobe(fn.fd, 0, ev_name, path, addr, pid)
if fd < 0:
raise Exception("Failed to attach BPF to uprobe")
self._add_uprobe_fd(ev_name, fd)
return self
def attach_uretprobe(self, name=b"", sym=b"", sym_re=b"", addr=None,
fn_name=b"", pid=-1):
"""attach_uretprobe(name="", sym="", sym_re="", addr=None, fn_name=""
pid=-1)
Run the bpf function denoted by fn_name every time the symbol sym in
the library or binary 'name' finishes execution. See attach_uprobe for
meaning of additional parameters.
"""
name = _assert_is_bytes(name)
sym = _assert_is_bytes(sym)
sym_re = _assert_is_bytes(sym_re)
fn_name = _assert_is_bytes(fn_name)
if sym_re:
for sym_addr in BPF.get_user_addresses(name, sym_re):
self.attach_uretprobe(name=name, addr=sym_addr,
fn_name=fn_name, pid=pid)
return
(path, addr) = BPF._check_path_symbol(name, sym, addr, pid)
self._check_probe_quota(1)
fn = self.load_func(fn_name, BPF.KPROBE)
ev_name = self._get_uprobe_evname(b"r", path, addr, pid)
fd = lib.bpf_attach_uprobe(fn.fd, 1, ev_name, path, addr, pid)
if fd < 0:
raise Exception("Failed to attach BPF to uretprobe")
self._add_uprobe_fd(ev_name, fd)
return self
def detach_uprobe_event(self, ev_name):
if ev_name not in self.uprobe_fds:
raise Exception("Uprobe %s is not attached" % ev_name)
res = lib.bpf_close_perf_event_fd(self.uprobe_fds[ev_name])
if res < 0:
raise Exception("Failed to detach BPF from uprobe")
res = lib.bpf_detach_uprobe(ev_name)
if res < 0:
raise Exception("Failed to detach BPF from uprobe")
self._del_uprobe_fd(ev_name)
def detach_uprobe(self, name=b"", sym=b"", addr=None, pid=-1):
"""detach_uprobe(name="", sym="", addr=None, pid=-1)
Stop running a bpf function that is attached to symbol 'sym' in library
or binary 'name'.
"""
name = _assert_is_bytes(name)
sym = _assert_is_bytes(sym)
(path, addr) = BPF._check_path_symbol(name, sym, addr, pid)
ev_name = self._get_uprobe_evname(b"p", path, addr, pid)
self.detach_uprobe_event(ev_name)
def detach_uretprobe(self, name=b"", sym=b"", addr=None, pid=-1):
"""detach_uretprobe(name="", sym="", addr=None, pid=-1)
Stop running a bpf function that is attached to symbol 'sym' in library
or binary 'name'.
"""
name = _assert_is_bytes(name)
sym = _assert_is_bytes(sym)
(path, addr) = BPF._check_path_symbol(name, sym, addr, pid)
ev_name = self._get_uprobe_evname(b"r", path, addr, pid)
self.detach_uprobe_event(ev_name)
def _trace_autoload(self):
for i in range(0, lib.bpf_num_functions(self.module)):
func_name = lib.bpf_function_name(self.module, i)
if func_name.startswith(b"kprobe__"):
fn = self.load_func(func_name, BPF.KPROBE)
self.attach_kprobe(
event=self.fix_syscall_fnname(func_name[8:]),
fn_name=fn.name)
elif func_name.startswith(b"kretprobe__"):
fn = self.load_func(func_name, BPF.KPROBE)
self.attach_kretprobe(
event=self.fix_syscall_fnname(func_name[11:]),
fn_name=fn.name)
elif func_name.startswith(b"tracepoint__"):
fn = self.load_func(func_name, BPF.TRACEPOINT)
tp = fn.name[len(b"tracepoint__"):].replace(b"__", b":")
self.attach_tracepoint(tp=tp, fn_name=fn.name)
elif func_name.startswith(b"raw_tracepoint__"):
fn = self.load_func(func_name, BPF.RAW_TRACEPOINT)
tp = fn.name[len(b"raw_tracepoint__"):]
self.attach_raw_tracepoint(tp=tp, fn_name=fn.name)
def trace_open(self, nonblocking=False):
"""trace_open(nonblocking=False)
Open the trace_pipe if not already open
"""
if not self.tracefile:
self.tracefile = open("%s/trace_pipe" % TRACEFS, "rb")
if nonblocking:
fd = self.tracefile.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
return self.tracefile
def trace_fields(self, nonblocking=False):
"""trace_fields(nonblocking=False)
Read from the kernel debug trace pipe and return a tuple of the
fields (task, pid, cpu, flags, timestamp, msg) or None if no
line was read (nonblocking=True)
"""
while True:
line = self.trace_readline(nonblocking)
if not line and nonblocking: return (None,) * 6
# don't print messages related to lost events
if line.startswith(b"CPU:"): continue
task = line[:16].lstrip()
line = line[17:]
ts_end = line.find(b":")
pid, cpu, flags, ts = line[:ts_end].split()
cpu = cpu[1:-1]
# line[ts_end:] will have ": [sym_or_addr]: msgs"
# For trace_pipe debug output, the addr typically
# is invalid (e.g., 0x1). For kernel 4.12 or earlier,
# if address is not able to match a kernel symbol,
# nothing will be printed out. For kernel 4.13 and later,
# however, the illegal address will be printed out.
# Hence, both cases are handled here.
line = line[ts_end + 1:]
sym_end = line.find(b":")
msg = line[sym_end + 2:]
return (task, int(pid), int(cpu), flags, float(ts), msg)
def trace_readline(self, nonblocking=False):
"""trace_readline(nonblocking=False)
Read from the kernel debug trace pipe and return one line
If nonblocking is False, this will block until ctrl-C is pressed.
"""
trace = self.trace_open(nonblocking)
line = None
try:
line = trace.readline(1024).rstrip()
except IOError:
pass
return line
def trace_print(self, fmt=None):
"""trace_print(self, fmt=None)
Read from the kernel debug trace pipe and print on stdout.
If fmt is specified, apply as a format string to the output. See
trace_fields for the members of the tuple
example: trace_print(fmt="pid {1}, msg = {5}")
"""
while True:
if fmt:
fields = self.trace_fields(nonblocking=False)
if not fields: continue
line = fmt.format(*fields)
else:
line = self.trace_readline(nonblocking=False)
print(line)
sys.stdout.flush()
@staticmethod
def _sym_cache(pid):
"""_sym_cache(pid)
Returns a symbol cache for the specified PID.
The kernel symbol cache is accessed by providing any PID less than zero.
"""
if pid < 0 and pid != -1:
pid = -1
if not pid in BPF._sym_caches:
BPF._sym_caches[pid] = SymbolCache(pid)
return BPF._sym_caches[pid]
@staticmethod
def sym(addr, pid, show_module=False, show_offset=False, demangle=True):
"""sym(addr, pid, show_module=False, show_offset=False)
Translate a memory address into a function name for a pid, which is
returned. When show_module is True, the module name is also included.
When show_offset is True, the instruction offset as a hexadecimal
number is also included in the string.
A pid of less than zero will access the kernel symbol cache.
Example output when both show_module and show_offset are True:
"start_thread+0x202 [libpthread-2.24.so]"
Example output when both show_module and show_offset are False:
"start_thread"
"""
#addr is of type stacktrace_build_id
#so invoke the bsym address resolver
typeofaddr = str(type(addr))
if typeofaddr.find('bpf_stack_build_id') != -1:
sym = bcc_symbol()
b = bcc_stacktrace_build_id()
b.status = addr.status
b.build_id = addr.build_id
b.u.offset = addr.offset;
res = lib.bcc_buildsymcache_resolve(BPF._bsymcache,
ct.byref(b),
ct.byref(sym))
if res < 0:
if sym.module and sym.offset:
name,offset,module = (None, sym.offset,
ct.cast(sym.module, ct.c_char_p).value)
else:
name, offset, module = (None, addr, None)
else:
name, offset, module = (sym.name, sym.offset,
ct.cast(sym.module, ct.c_char_p).value)
else:
name, offset, module = BPF._sym_cache(pid).resolve(addr, demangle)
offset = b"+0x%x" % offset if show_offset and name is not None else b""
name = name or b"[unknown]"
name = name + offset
module = b" [%s]" % os.path.basename(module) \
if show_module and module is not None else b""
return name + module
@staticmethod
def ksym(addr, show_module=False, show_offset=False):
"""ksym(addr)
Translate a kernel memory address into a kernel function name, which is
returned. When show_module is True, the module name ("kernel") is also
included. When show_offset is true, the instruction offset as a
hexadecimal number is also included in the string.
Example output when both show_module and show_offset are True:
"default_idle+0x0 [kernel]"
"""
return BPF.sym(addr, -1, show_module, show_offset, False)
@staticmethod
def ksymname(name):
"""ksymname(name)
Translate a kernel name into an address. This is the reverse of
ksym. Returns -1 when the function name is unknown."""
return BPF._sym_cache(-1).resolve_name(None, name)
def num_open_kprobes(self):
"""num_open_kprobes()
Get the number of open K[ret]probes. Can be useful for scenarios where
event_re is used while attaching and detaching probes.
"""
return len(self.kprobe_fds)
def num_open_uprobes(self):
"""num_open_uprobes()
Get the number of open U[ret]probes.
"""
return len(self.uprobe_fds)
def num_open_tracepoints(self):
"""num_open_tracepoints()
Get the number of open tracepoints.
"""
return len(self.tracepoint_fds)
def perf_buffer_poll(self, timeout = -1):
"""perf_buffer_poll(self)
Poll from all open perf ring buffers, calling the callback that was
provided when calling open_perf_buffer for each entry.
"""
readers = (ct.c_void_p * len(self.perf_buffers))()
for i, v in enumerate(self.perf_buffers.values()):
readers[i] = v
lib.perf_reader_poll(len(readers), readers, timeout)
def kprobe_poll(self, timeout = -1):
"""kprobe_poll(self)
Deprecated. Use perf_buffer_poll instead.
"""
self.perf_buffer_poll(timeout)
def free_bcc_memory(self):
return lib.bcc_free_memory()
@staticmethod
def add_module(modname):
"""add_module(modname)
Add a library or exe to buildsym cache
"""
try:
lib.bcc_buildsymcache_add_module(BPF._bsymcache, modname.encode())
except Exception as e:
print("Error adding module to build sym cache"+str(e))
def donothing(self):
"""the do nothing exit handler"""
def cleanup(self):
# Clean up opened probes
for k, v in list(self.kprobe_fds.items()):
self.detach_kprobe_event(k)
for k, v in list(self.uprobe_fds.items()):
self.detach_uprobe_event(k)
for k, v in list(self.tracepoint_fds.items()):
self.detach_tracepoint(k)
for k, v in list(self.raw_tracepoint_fds.items()):
self.detach_raw_tracepoint(k)
# Clean up opened perf ring buffer and perf events
table_keys = list(self.tables.keys())
for key in table_keys:
if isinstance(self.tables[key], PerfEventArray):
del self.tables[key]
for (ev_type, ev_config) in list(self.open_perf_events.keys()):
self.detach_perf_event(ev_type, ev_config)
if self.tracefile:
self.tracefile.close()
self.tracefile = None
if self.module:
lib.bpf_module_destroy(self.module)
self.module = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.cleanup()
from .usdt import USDT, USDTException
|
|
#!/usr/bin/env python3
# testBoundLog.py
import os
import time
import unittest
from xlattice import HashTypes, check_hashtype
from upax.ftlog import BoundLog, FileReader, LogEntry, StringReader
class TestBoundLog(unittest.TestCase):
def setUp(self):
self.u_dir = "dev0/U"
self.path_to_log = "%s/L" % self.u_dir
if os.path.exists(self.path_to_log):
os.remove(self.path_to_log)
def tearDown(self):
pass
def get_good(self, hashtype):
if hashtype == HashTypes.SHA1:
goodkey_1 = '0123456789012345678901234567890123456789'
goodkey_2 = 'fedcba9876543210fedcba9876543210fedcba98'
goodkey_3 = '1234567890123456789012345678901234567890'
goodkey_4 = 'edcba9876543210fedcba9876543210fedcba98f'
goodkey_5 = '2345678901234567890123456789012345678901'
goodkey_6 = 'dcba9876543210fedcba9876543210fedcba98fe'
goodkey_7 = '3456789012345678901234567890123456789012'
goodkey_8 = 'cba9876543210fedcba9876543210fedcba98fed'
else:
# meaningless values, OK for sha2 or sha3
goodkey_1 = '0123456789012345678901234567890123' + \
'456789abcdef3330123456789abcde'
goodkey_2 = 'fedcba9876543210fedcba9876543210fe' + \
'dcba98012345678901234567890123'
goodkey_3 = '1234567890123456789012345678901234' + \
'567890abcdef697698768696969696'
goodkey_4 = 'edcba9876543210fedcba9876543210fed' + \
'cba98f012345678901234567890123'
goodkey_5 = '2345678901234567890123456789012345' + \
'678901654654647645647654754757'
goodkey_6 = 'dcba9876543210fedcba9876543210fedc' + \
'ba98fe453254323243253274754777'
goodkey_7 = '3456789012345678901234567890123456' + \
'789012abcdef696878687686999987'
goodkey_8 = 'cba9876543210fedcba9876543210fedcb' + \
'a98fedfedcab698687669676999988'
return (goodkey_1, goodkey_2, goodkey_3, goodkey_4,
goodkey_5, goodkey_6, goodkey_7, goodkey_8,)
def do_test_log_without_entries(self, hashtype):
check_hashtype(hashtype)
(goodkey_1, goodkey_2, _, _, _, _, _, _) = self.get_good(hashtype)
time0 = 1000 * (int(time.time()) - 10000)
# the first line of an otherwise empty log file
empty_log = "%013u %s %s\n" % (time0, goodkey_1, goodkey_2)
reader = StringReader(empty_log, hashtype)
log = BoundLog(
reader,
hashtype,
self.u_dir,
'L') # will default to 'L'
assert log is not None
self.assertEqual(time0, log.timestamp)
self.assertEqual(goodkey_1, log.prev_hash)
self.assertEqual(goodkey_2, log.prev_master)
# only first line should appear, because there are no entries
expected = empty_log
actual = log.__str__()
self.assertEqual(expected, actual)
self.assertEqual(0, len(log))
# disk file must exist and must contain just the one line
path_to_log = "%s/L" % "dev0/U/"
assert os.path.exists(path_to_log)
contents = ''
with open(path_to_log, "r") as file:
contents = file.read()
self.assertEqual(empty_log, contents)
log.close()
def test_log_without_entries(self):
for using in HashTypes:
self.do_test_log_without_entries(using)
def setup_the_server(self, hashtype):
(goodkey_1, goodkey_2, goodkey_3, goodkey_4,
goodkey_5, goodkey_6, goodkey_7, goodkey_8,) = self.get_good(hashtype)
time0 = int(time.time()) - 10000
time1 = time0 + 100
time2 = time1 + 100
time3 = time2 + 100
entry1 = LogEntry(time1, goodkey_3, goodkey_4, 'jdd', 'e@document1')
entry2 = LogEntry(time2, goodkey_5, goodkey_6, 'jdd', 'e@document2')
entry3 = LogEntry(time3, goodkey_7, goodkey_8, 'jdd', 'e@document3')
empty_log = "%013u %s %s\n" % (time0, goodkey_1, goodkey_2)
log_w_three = empty_log + str(entry1) + str(entry2) + str(entry3)
return (time0, time1, time2, time3, entry1,
entry2, entry3, empty_log, log_w_three)
def do_test_multi_entry_log(self, hashtype):
check_hashtype(hashtype)
(goodkey_1, goodkey_2, goodkey_3, _,
goodkey_5, _, goodkey_7, _,) = self.get_good(hashtype)
(time0, time1, _, _, entry1, entry2, entry3, _,
log_w_three) = self.setup_the_server(hashtype)
reader = StringReader(log_w_three, hashtype)
log = BoundLog(reader, hashtype, self.u_dir, 'L')
assert log is not None
self.assertEqual(time0, log.timestamp)
self.assertEqual(goodkey_1, log.prev_hash)
self.assertEqual(goodkey_2, log.prev_master)
self.assertEqual(3, len(log))
self.assertTrue(goodkey_3 in log)
entry = log.get_entry(goodkey_3)
self.assertEqual(entry1, entry)
self.assertTrue(goodkey_5 in log)
entry = log.get_entry(goodkey_5)
self.assertEqual(entry2, entry)
self.assertTrue(goodkey_7 in log)
entry = log.get_entry(goodkey_7)
self.assertEqual(entry3, entry)
with open(self.path_to_log, 'r') as file:
log_contents = file.read()
self.assertEqual(log_w_three, log_contents)
log.close()
def test_multi_entry_log(self):
for using in HashTypes:
self.do_test_multi_entry_log(using)
def do_test_add_entry(self, hashtype):
check_hashtype(hashtype)
(goodkey_1, goodkey_2, goodkey_3, goodkey_4,
goodkey_5, goodkey_6, goodkey_7, goodkey_8,) = self.get_good(hashtype)
(time0, time1, time2, time3, entry1, entry2, entry3, empty_log,
log_w_three) = self.setup_the_server(hashtype)
reader = StringReader(empty_log, hashtype)
log = BoundLog(reader, hashtype, self.u_dir, 'L')
assert log is not None
self.assertEqual(time0, log.timestamp)
self.assertEqual(goodkey_1, log.prev_hash)
self.assertEqual(goodkey_2, log.prev_master)
self.assertEqual(0, len(log))
# key srcNodeID
log.add_entry(time1, goodkey_3, goodkey_4, 'jdd', 'e@document1')
self.assertEqual(1, len(log))
entry = log.get_entry(goodkey_3)
self.assertEqual(entry1, entry)
self.assertTrue(goodkey_3 in log)
self.assertFalse(goodkey_5 in log)
log.add_entry(time2, goodkey_5, goodkey_6, 'jdd', 'e@document2')
self.assertEqual(2, len(log))
entry = log.get_entry(goodkey_5)
self.assertEqual(entry2, entry)
self.assertTrue(goodkey_5 in log)
log.add_entry(time3, goodkey_7, goodkey_8, 'jdd', 'e@document3')
self.assertEqual(3, len(log))
entry = log.get_entry(goodkey_7)
self.assertEqual(entry3, entry)
self.assertTrue(goodkey_7 in log)
log.close() # without this next test fails
with open(self.path_to_log, 'r') as file:
log_contents = file.read()
self.assertEqual(log_w_three, log_contents)
def test_add_entry(self):
for using in HashTypes:
self.do_test_add_entry(using)
def do_test_with_opens_and_closes(self, hashtype):
check_hashtype(hashtype)
(goodkey_1, goodkey_2, goodkey_3, goodkey_4,
goodkey_5, goodkey_6, goodkey_7, goodkey_8,) = self.get_good(hashtype)
(time0, time1, time2, time3, entry1, entry2, entry3, empty_log,
log_w_three) = self.setup_the_server(hashtype)
reader = StringReader(empty_log, hashtype)
log = BoundLog(reader, hashtype, self.u_dir)
assert log is not None
self.assertEqual(time0, log.timestamp)
self.assertEqual(goodkey_1, log.prev_hash)
self.assertEqual(goodkey_2, log.prev_master)
self.assertEqual(0, len(log))
log.close()
reader = FileReader(self.u_dir, hashtype)
log = BoundLog(reader, hashtype)
log.add_entry(time1, goodkey_3, goodkey_4, 'jdd', 'e@document1')
self.assertEqual(1, len(log))
entry = log.get_entry(goodkey_3)
self.assertEqual(entry1, entry)
self.assertTrue(goodkey_3 in log)
self.assertFalse(goodkey_5 in log)
log.close()
reader = FileReader(self.u_dir, hashtype)
log = BoundLog(reader, hashtype)
log.add_entry(time2, goodkey_5, goodkey_6, 'jdd', 'e@document2')
self.assertEqual(2, len(log))
entry = log.get_entry(goodkey_5)
self.assertEqual(entry2, entry)
self.assertTrue(goodkey_5 in log)
log.close()
reader = FileReader(self.u_dir, hashtype)
log = BoundLog(reader, hashtype)
log.add_entry(time3, goodkey_7, goodkey_8, 'jdd', 'e@document3')
self.assertEqual(3, len(log))
entry = log.get_entry(goodkey_7)
self.assertEqual(entry3, entry)
self.assertTrue(goodkey_7 in log)
log.close()
with open(self.path_to_log, 'r') as file:
log_contents = file.read()
self.assertEqual(log_w_three, log_contents)
def test_with_opens_and_closes(self):
for hashtype in HashTypes:
self.do_test_with_opens_and_closes(hashtype)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
import logging
import unittest
import MySQLdb
import environment
import tablet
import utils
use_mysqlctld = True
tablet_master = tablet.Tablet(use_mysqlctld=use_mysqlctld)
tablet_replica1 = tablet.Tablet(use_mysqlctld=use_mysqlctld)
tablet_replica2 = tablet.Tablet(use_mysqlctld=use_mysqlctld)
setup_procs = []
def setUpModule():
try:
environment.topo_server().setup()
# start mysql instance external to the test
global setup_procs
setup_procs = [
tablet_master.init_mysql(),
tablet_replica1.init_mysql(),
tablet_replica2.init_mysql(),
]
if use_mysqlctld:
tablet_master.wait_for_mysqlctl_socket()
tablet_replica1.wait_for_mysqlctl_socket()
tablet_replica2.wait_for_mysqlctl_socket()
else:
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
if use_mysqlctld:
# Try to terminate mysqlctld gracefully, so it kills its mysqld.
for proc in setup_procs:
utils.kill_sub_process(proc, soft=True)
teardown_procs = setup_procs
else:
teardown_procs = [
tablet_master.teardown_mysql(),
tablet_replica1.teardown_mysql(),
tablet_replica2.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
tablet_master.remove_tree()
tablet_replica1.remove_tree()
tablet_replica2.remove_tree()
class TestBackup(unittest.TestCase):
def setUp(self):
for t in tablet_master, tablet_replica1:
t.create_db('vt_test_keyspace')
tablet_master.init_tablet('master', 'test_keyspace', '0', start=True,
supports_backups=True)
tablet_replica1.init_tablet('replica', 'test_keyspace', '0', start=True,
supports_backups=True)
utils.run_vtctl(['InitShardMaster', 'test_keyspace/0',
tablet_master.tablet_alias])
def tearDown(self):
for t in tablet_master, tablet_replica1:
t.kill_vttablet()
tablet.Tablet.check_vttablet_count()
environment.topo_server().wipe()
for t in [tablet_master, tablet_replica1, tablet_replica2]:
t.reset_replication()
t.set_semi_sync_enabled(master=False)
t.clean_dbs()
for backup in self._list_backups():
self._remove_backup(backup)
_create_vt_insert_test = '''create table vt_insert_test (
id bigint auto_increment,
msg varchar(64),
primary key (id)
) Engine=InnoDB'''
def _insert_data(self, t, index):
"""Add a single row with value 'index' to the given tablet."""
t.mquery(
'vt_test_keyspace',
"insert into vt_insert_test (msg) values ('test %s')" %
index, write=True)
def _check_data(self, t, count, msg):
"""Check that the specified tablet has the expected number of rows."""
timeout = 10
while True:
try:
result = t.mquery(
'vt_test_keyspace', 'select count(*) from vt_insert_test')
if result[0][0] == count:
break
except MySQLdb.DatabaseError:
# ignore exceptions, we'll just timeout (the tablet creation
# can take some time to replicate, and we get a 'table vt_insert_test
# does not exist exception in some rare cases)
logging.exception('exception waiting for data to replicate')
timeout = utils.wait_step(msg, timeout)
def _restore(self, t):
"""Erase mysql/tablet dir, then start tablet with restore enabled."""
self._reset_tablet_dir(t)
t.start_vttablet(wait_for_state='SERVING',
init_tablet_type='replica',
init_keyspace='test_keyspace',
init_shard='0',
supports_backups=True)
def _reset_tablet_dir(self, t):
"""Stop mysql, delete everything including tablet dir, restart mysql."""
utils.wait_procs([t.teardown_mysql()])
t.remove_tree()
proc = t.init_mysql()
if use_mysqlctld:
t.wait_for_mysqlctl_socket()
else:
utils.wait_procs([proc])
def _list_backups(self):
"""Get a list of backup names for the test shard."""
backups, _ = utils.run_vtctl(tablet.get_backup_storage_flags() +
['ListBackups', 'test_keyspace/0'],
mode=utils.VTCTL_VTCTL, trap_output=True)
return backups.splitlines()
def _remove_backup(self, backup):
"""Remove a named backup from the test shard."""
utils.run_vtctl(
tablet.get_backup_storage_flags() +
['RemoveBackup', 'test_keyspace/0', backup],
auto_log=True, mode=utils.VTCTL_VTCTL)
def test_backup(self):
"""Test backup flow.
test_backup will:
- create a shard with master and replica1 only
- run InitShardMaster
- insert some data
- take a backup
- insert more data on the master
- bring up tablet_replica2 after the fact, let it restore the backup
- check all data is right (before+after backup data)
- list the backup, remove it
"""
# insert data on master, wait for slave to get it
tablet_master.mquery('vt_test_keyspace', self._create_vt_insert_test)
self._insert_data(tablet_master, 1)
self._check_data(tablet_replica1, 1, 'replica1 tablet getting data')
# backup the slave
utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True)
# insert more data on the master
self._insert_data(tablet_master, 2)
# now bring up the other slave, letting it restore from backup.
self._restore(tablet_replica2)
# check the new slave has the data
self._check_data(tablet_replica2, 2, 'replica2 tablet getting data')
# check that the backup shows up in the listing
backups = self._list_backups()
logging.debug('list of backups: %s', backups)
self.assertEqual(len(backups), 1)
self.assertTrue(backups[0].endswith(tablet_replica1.tablet_alias))
# remove the backup and check that the list is empty
self._remove_backup(backups[0])
backups = self._list_backups()
logging.debug('list of backups after remove: %s', backups)
self.assertEqual(len(backups), 0)
tablet_replica2.kill_vttablet()
def test_master_slave_same_backup(self):
"""Test a master and slave from the same backup.
Check that a slave and master both restored from the same backup
can replicate successfully.
"""
# insert data on master, wait for slave to get it
tablet_master.mquery('vt_test_keyspace', self._create_vt_insert_test)
self._insert_data(tablet_master, 1)
self._check_data(tablet_replica1, 1, 'replica1 tablet getting data')
# backup the slave
utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True)
# insert more data on the master
self._insert_data(tablet_master, 2)
# now bring up the other slave, letting it restore from backup.
self._restore(tablet_replica2)
# check the new slave has the data
self._check_data(tablet_replica2, 2, 'replica2 tablet getting data')
# Promote replica2 to master.
utils.run_vtctl(['PlannedReparentShard', 'test_keyspace/0',
tablet_replica2.tablet_alias])
# insert more data on replica2 (current master)
self._insert_data(tablet_replica2, 3)
# Force replica1 to restore from backup.
tablet_replica1.kill_vttablet()
self._restore(tablet_replica1)
# wait for replica1 to catch up.
self._check_data(tablet_replica1, 3,
'replica1 getting data from restored master')
tablet_replica2.kill_vttablet()
def _restore_old_master_test(self, restore_method):
"""Test that a former master replicates correctly after being restored.
- Take a backup.
- Reparent from old master to new master.
- Force old master to restore from a previous backup using restore_method.
Args:
restore_method: function accepting one parameter of type tablet.Tablet,
this function is called to force a restore on the provided tablet
"""
# insert data on master, wait for slave to get it
tablet_master.mquery('vt_test_keyspace', self._create_vt_insert_test)
self._insert_data(tablet_master, 1)
self._check_data(tablet_replica1, 1, 'replica1 tablet getting data')
# backup the slave
utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True)
# insert more data on the master
self._insert_data(tablet_master, 2)
# reparent to replica1
utils.run_vtctl(['PlannedReparentShard', 'test_keyspace/0',
tablet_replica1.tablet_alias])
# insert more data on new master
self._insert_data(tablet_replica1, 3)
# force the old master to restore at the latest backup.
restore_method(tablet_master)
# wait for it to catch up.
self._check_data(tablet_master, 3, 'former master catches up after restore')
def test_restore_old_master(self):
def _restore_using_kill(t):
t.kill_vttablet()
self._restore(t)
self._restore_old_master_test(_restore_using_kill)
def test_in_place_restore(self):
def _restore_in_place(t):
utils.run_vtctl(['RestoreFromBackup', t.tablet_alias], auto_log=True)
self._restore_old_master_test(_restore_in_place)
if __name__ == '__main__':
utils.main()
|
|
import pytest
import re
from pynt import _pynt, main
import sys
if sys.version.startswith("3."):
from io import StringIO as SOut
else:
from StringIO import StringIO as SOut
import os
from os import path
import imp
def fpath(mod):
return path.splitext(mod.__file__)[0] + '.py'
def simulate_dynamic_module_load(mod):
file_path = fpath(mod)
#sys.path.append(path.abspath(script_dir))
module_name, suffix = path.splitext(path.basename(file_path))
description = (suffix, 'r', imp.PY_SOURCE)
with open(file_path, 'r') as scriptfile:
return imp.load_module(module_name, scriptfile, file_path, description)
def reset_build_file(mod):
mod.tasks_run = []
def build(mod, params=None, init_mod = reset_build_file):
dynamically_loaded_mod = simulate_dynamic_module_load(mod)
dynamically_loaded_mod.tasks_run = []
sys.argv = ['pynt', '-f', fpath(mod)] + (params or [])
main()
return dynamically_loaded_mod
class TestParseArgs:
def test_parsing_commandline(self):
args = _pynt._create_parser().parse_args(['-f', "foo.py", "task1", "task2"])
assert "foo.py" == args.file
assert not args.list_tasks
assert ['task1', 'task2'] == args.tasks
def test_parsing_commandline_help(self):
assert _pynt._create_parser().parse_args(["-l"]).list_tasks
assert _pynt._create_parser().parse_args([ "--list-tasks"]).list_tasks
def test_parsing_commandline_build_file(self):
assert "some_file" == _pynt._create_parser().parse_args(["-f", "some_file"]).file
assert "build.py" == _pynt._create_parser().parse_args([]).file
assert "/foo/bar" == _pynt._create_parser().parse_args(
["--file", "/foo/bar"]).file
with pytest.raises(SystemExit):
_pynt._create_parser().parse_args(["--file"])
with pytest.raises(SystemExit):
_pynt._create_parser().parse_args(["-f"])
class TestBuildSimple:
def test_get_tasks(self):
from .build_scripts import simple
ts = _pynt._get_tasks(simple)
assert len(ts) == 5
class TestBuildWithDependencies:
def test_get_tasks(self):
from .build_scripts import dependencies
tasks = _pynt._get_tasks(dependencies)#private tasks are not in this list
assert len(tasks) == 5
assert 4 == len([task for task in tasks if task.name == 'android'][0].dependencies)
assert 4 == len([task for task in tasks if task.name == 'ios'][0].dependencies)
def test_dependencies_for_imported(self):
from .build_scripts import default_task_and_import_dependencies
tasks = _pynt._get_tasks(default_task_and_import_dependencies)
assert 7 == len(tasks)
assert [task for task in tasks if task.name == 'clean']
assert [task for task in tasks if task.name == 'local_task']
assert [task for task in tasks if task.name == 'android']
assert 3 == len([task for task in tasks
if task.name == 'task_with_imported_dependencies'][0].dependencies)
def test_dependencies_that_are_imported_e2e(self):
from .build_scripts import default_task_and_import_dependencies
def mod_init(mod):
mod.tasks_run = []
mod.build_with_params.tasks_run = []
module = build(default_task_and_import_dependencies,
["task_with_imported_dependencies"], init_mod = mod_init)
assert module.tasks_run == ['local_task', 'task_with_imported_dependencies']
assert module.build_with_params.tasks_run == ['clean[/tmp]', 'html']
class TestDecorationValidation:
def test_task_without_braces(self):
with pytest.raises(Exception) as exc:
from .build_scripts import annotation_misuse_1
assert 'Replace use of @task with @task().' in str(exc.value)
def test_dependency_not_a_task(self):
with pytest.raises(Exception) as exc:
from .build_scripts import annotation_misuse_2
assert re.findall('function html.* is not a task.', str(exc.value))
def test_dependency_not_a_function(self):
with pytest.raises(Exception) as exc:
from .build_scripts import annotation_misuse_3
assert '1234 is not a task.' in str(exc.value)
import contextlib
@contextlib.contextmanager
def mock_stdout():
oldout, olderr = sys.stdout, sys.stderr
try:
out = [SOut(), SOut()]
sys.stdout, sys.stderr = out
yield out
finally:
sys.stdout, sys.stderr = oldout, olderr
out[0] = out[0].getvalue()
out[1] = out[1].getvalue()
class TestOptions:
@pytest.fixture
def module(self):
from .build_scripts import options as module
self.docs = {'clean': '', 'html': 'Generate HTML.',
'images': '''Prepare images.\n\nShould be ignored.''',
'android': 'Package Android app.'}
return module
def test_ignore_tasks(self, module):
module = build(module,["android"])
assert ['clean', 'html', 'android'] == module.tasks_run
def test_docs(self, module):
tasks = _pynt._get_tasks(module)
assert 4 == len(tasks)
for task_ in tasks:
assert task_.name in self.docs
assert self.docs[task_.name] == task_.doc
@pytest.mark.parametrize('args', [['-l'], ['--list-tasks'], []])
def test_list_docs(self, module, args):
with mock_stdout() as out:
build(module,args)
stdout = out[0]
tasks = _pynt._get_tasks(module)
for task in tasks:
if task.ignored:
assert re.findall('%s\s+%s\s+%s' % (task.name,"\[Ignored\]", task.doc), stdout)
else:
assert re.findall('%s\s+%s' % (task.name, task.doc), stdout)
class TestRuntimeError:
def test_stop_on_exception(self):
from .build_scripts import runtime_error as re
with pytest.raises(IOError):
build(re,["android"])
mod = simulate_dynamic_module_load(re)
assert mod.ran_images
assert not hasattr(mod, 'ran_android')
def test_exception_on_invalid_task_name(self):
from .build_scripts import build_with_params
with pytest.raises(Exception) as exc:
build(build_with_params,["doesnt_exist"])
assert 'task should be one of append_to_file, clean' \
', copy_file, echo, html, start_server, tests' in str(exc.value)
class TestPartialTaskNames:
def setup_method(self,method):
from .build_scripts import build_with_params
self._mod = build_with_params
def test_with_partial_name(self):
mod = build(self._mod, ["cl"])
assert ['clean[/tmp]'] == mod.tasks_run
def test_with_partial_name_and_dependencies(self):
mod = build(self._mod, ["htm"])
assert ['clean[/tmp]','html'] == mod.tasks_run
def test_exception_on_conflicting_partial_names(self):
with pytest.raises(Exception) as exc:
build(self._mod, ["c"])
assert ('Conflicting matches clean, copy_file for task c' in str(exc.value) or
'Conflicting matches copy_file, clean for task c' in str(exc.value))
class TestDefaultTask:
def test_simple_default_task(self):
from .build_scripts import simple
assert _pynt._run_default_task(simple) #returns false if no default task
def test_module_with_defaults_which_imports_other_files_with_defaults(self):
from .build_scripts import default_task_and_import_dependencies
mod = build(default_task_and_import_dependencies)
assert 'task_with_imported_dependencies' in mod.tasks_run
class TestMultipleTasks:
def setup_method(self,method):
from .build_scripts import build_with_params
self._mod = build_with_params
def test_dependency_is_run_only_once_unless_explicitly_invoked_again(self):
mod = build(self._mod, ["clean", "html", 'tests', "clean"])
assert ['clean[/tmp]', "html", "tests[]", "clean[/tmp]"] == mod.tasks_run
def test_multiple_partial_names(self):
assert ['clean[/tmp]', "html"] == build(self._mod, ["cl", "htm"]).tasks_run
class TesttaskArguments:
def setup_method(self,method):
from .build_scripts import build_with_params
self._mod = build_with_params
self._mod.tasks_run = []
def test_passing_optional_params_with_dependencies(self):
mod = build(self._mod, ["clean[~/project/foo]",
'append_to_file[/foo/bar,ABCDEF]',
"copy_file[/foo/bar,/foo/blah,False]",
'start_server[8080]'])
assert ["clean[~/project/foo]", 'append_to_file[/foo/bar,ABCDEF]',
"copy_file[/foo/bar,/foo/blah,False]", 'start_server[8080,True]'
] == mod.tasks_run
def test_invoking_varargs_task(self):
mod = build(self._mod, ['tests[test1,test2,test3]'])
assert ['tests[test1,test2,test3]'] == mod.tasks_run
def test_partial_name_with_args(self):
mod = build(self._mod, ['co[foo,bar]','star'])
assert ['clean[/tmp]','copy_file[foo,bar,True]', 'start_server[80,True]'
] == mod.tasks_run
def test_passing_keyword_args(self):
mod = build(self._mod, ['co[to=bar,from_=foo]','star[80,debug=False]', 'echo[foo=bar,blah=123]'])
assert ['clean[/tmp]','copy_file[foo,bar,True]',
'start_server[80,False]',
'echo[blah=123,foo=bar]'] == mod.tasks_run
def test_passing_varargs_and_keyword_args(self):
assert (['echo[1,2,3,some_str,111=333,bar=123.3,foo=xyz]']
==
build(self._mod,
['echo[1,2,3,some_str,111=333,foo=xyz,bar=123.3]']
).tasks_run)
def test_validate_keyword_arguments_always_after_args(self):
with pytest.raises(Exception) as exc:
build(self._mod, ['echo[bar=123.3,foo]'])
assert "Non keyword arg foo cannot follows" \
" a keyword arg bar=123.3" in str(exc.value)
with pytest.raises(Exception) as exc:
build(self._mod, ['copy[from_=/foo,/foo1]'])
assert "Non keyword arg /foo1 cannot follows" \
" a keyword arg from_=/foo" in str(exc.value)
def test_invalid_number_of_args(self):
with pytest.raises(TypeError) as exc:
build(self._mod, ['append[1,2,3]'])
print(str(exc.value))
assert re.findall('takes .*2 .*arguments', str(exc.value))
def test_invalid_names_for_kwargs(self):
with pytest.raises(TypeError) as exc:
build(self._mod, ['copy[1=2,to=bar]'])
assert "got an unexpected keyword argument '1'" in str(exc.value)
with pytest.raises(TypeError) as exc:
build(self._mod, ['copy[bar123=2]'])
assert "got an unexpected keyword argument 'bar123'" in str(exc.value)
class TesttaskLocalImports:
def setup_method(self,method):
from .build_scripts import build_with_local_import
self._mod = build_with_local_import
self._mod.tasks_run = []
def test_load_build_with_local_import_does_not_fail(self):
mod = build(self._mod, ["work"])
|
|
import os
import sys
import urllib.request, urllib.error, urllib.parse
import copy
import threading
import time
import math
import tempfile
import base64
import hashlib
import socket
import logging
from io import StringIO
import multiprocessing.dummy as multiprocessing
from ctypes import c_int
import json
import ssl
from . import utils
from .control_thread import ControlThread
from .download import download
__all__ = ['SmartDL', 'utils']
__version_mjaor__ = 1
__version_minor__ = 3
__version_micro__ = 4
__version__ = "{}.{}.{}".format(__version_mjaor__, __version_minor__, __version_micro__)
class HashFailedException(Exception):
"Raised when hash check fails."
def __init__(self, fn, calc_hash, needed_hash):
self.filename = fn
self.calculated_hash = calc_hash
self.needed_hash = needed_hash
def __str__(self):
return 'HashFailedException({}, got {}, expected {})'.format(self.filename, self.calculated_hash, self.needed_hash)
def __repr__(self):
return '<HashFailedException {}, got {}, expected {}>'.format(self.filename, self.calculated_hash, self.needed_hash)
class CanceledException(Exception):
"Raised when the job is canceled."
def __init__(self):
pass
def __str__(self):
return 'CanceledException'
def __repr__(self):
return "<CanceledException>"
class SmartDL:
'''
The main SmartDL class
:param urls: Download url. It is possible to pass unsafe and unicode characters. You can also pass a list of urls, and those will be used as mirrors.
:type urls: string or list of strings
:param dest: Destination path. Default is `%TEMP%/pySmartDL/`.
:type dest: string
:param progress_bar: If True, prints a progress bar to the `stdout stream <http://docs.python.org/2/library/sys.html#sys.stdout>`_. Default is `True`.
:type progress_bar: bool
:param fix_urls: If true, attempts to fix urls with unsafe characters.
:type fix_urls: bool
:param threads: Number of threads to use.
:type threads: int
:param timeout: Timeout for network operations, in seconds. Default is 5.
:type timeout: int
:param logger: An optional logger.
:type logger: `logging.Logger` instance
:param connect_default_logger: If true, connects a default logger to the class.
:type connect_default_logger: bool
:param request_args: Arguments to be passed to a new urllib.request.Request instance in dictionary form. See `urllib.request docs <https://docs.python.org/3/library/urllib.request.html#urllib.request.Request>`_ for options.
:type request_args: dict
:rtype: `SmartDL` instance
:param verify: If ssl certificates should be validated.
:type verify: bool
.. NOTE::
The provided dest may be a folder or a full path name (including filename). The workflow is:
* If the path exists, and it's an existing folder, the file will be downloaded to there with the original filename.
* If the past does not exist, it will create the folders, if needed, and refer to the last section of the path as the filename.
* If you want to download to folder that does not exist at the moment, and want the module to fill in the filename, make sure the path ends with `os.sep`.
* If no path is provided, `%TEMP%/pySmartDL/` will be used.
'''
def __init__(self, urls, dest=None, progress_bar=True, fix_urls=True, threads=5, timeout=5, logger=None, connect_default_logger=False, request_args=None, verify=True):
if logger:
self.logger = logger
elif connect_default_logger:
self.logger = utils.create_debugging_logger()
else:
self.logger = utils.DummyLogger()
if request_args:
if "headers" not in request_args:
request_args["headers"] = dict()
self.requestArgs = request_args
else:
self.requestArgs = {"headers": dict()}
if "User-Agent" not in self.requestArgs["headers"]:
self.requestArgs["headers"]["User-Agent"] = utils.get_random_useragent()
self.mirrors = [urls] if isinstance(urls, str) else urls
if fix_urls:
self.mirrors = [utils.url_fix(x) for x in self.mirrors]
self.url = self.mirrors.pop(0)
self.logger.info('Using url "{}"'.format(self.url))
fn = urllib.parse.unquote(os.path.basename(urllib.parse.urlparse(self.url).path))
self.dest = dest or os.path.join(tempfile.gettempdir(), 'pySmartDL', fn)
if self.dest[-1] == os.sep:
if os.path.exists(self.dest[:-1]) and os.path.isfile(self.dest[:-1]):
os.unlink(self.dest[:-1])
self.dest += fn
if os.path.isdir(self.dest):
self.dest = os.path.join(self.dest, fn)
self.progress_bar = progress_bar
self.threads_count = threads
self.timeout = timeout
self.current_attemp = 1
self.attemps_limit = 4
self.minChunkFile = 1024**2*2 # 2MB
self.filesize = 0
self.shared_var = multiprocessing.Value(c_int, 0) # a ctypes var that counts the bytes already downloaded
self.thread_shared_cmds = {}
self.status = "ready"
self.verify_hash = False
self._killed = False
self._failed = False
self._start_func_blocking = True
self.errors = []
self.post_threadpool_thread = None
self.control_thread = None
if not os.path.exists(os.path.dirname(self.dest)):
self.logger.info('Folder "{}" does not exist. Creating...'.format(os.path.dirname(self.dest)))
os.makedirs(os.path.dirname(self.dest))
if not utils.is_HTTPRange_supported(self.url, timeout=self.timeout):
self.logger.warning("Server does not support HTTPRange. threads_count is set to 1.")
self.threads_count = 1
if os.path.exists(self.dest):
self.logger.warning('Destination "{}" already exists. Existing file will be removed.'.format(self.dest))
if not os.path.exists(os.path.dirname(self.dest)):
self.logger.warning('Directory "{}" does not exist. Creating it...'.format(os.path.dirname(self.dest)))
os.makedirs(os.path.dirname(self.dest))
self.logger.info("Creating a ThreadPool of {} thread(s).".format(self.threads_count))
self.pool = utils.ManagedThreadPoolExecutor(self.threads_count)
if verify:
self.context = None
else:
self.context = ssl.create_default_context()
self.context.check_hostname = False
self.context.verify_mode = ssl.CERT_NONE
def __str__(self):
return 'SmartDL(r"{}", dest=r"{}")'.format(self.url, self.dest)
def __repr__(self):
return "<SmartDL {}>".format(self.url)
def add_basic_authentication(self, username, password):
'''
Uses HTTP Basic Access authentication for the connection.
:param username: Username.
:type username: string
:param password: Password.
:type password: string
'''
auth_string = '{}:{}'.format(username, password)
base64string = base64.standard_b64encode(auth_string.encode('utf-8'))
self.requestArgs['headers']['Authorization'] = b"Basic " + base64string
def add_hash_verification(self, algorithm, hash):
'''
Adds hash verification to the download.
If hash is not correct, will try different mirrors. If all mirrors aren't
passing hash verification, `HashFailedException` Exception will be raised.
.. NOTE::
If downloaded file already exist on the destination, and hash matches, pySmartDL will not download it again.
.. WARNING::
The hashing algorithm must be supported on your system, as documented at `hashlib documentation page <http://docs.python.org/3/library/hashlib.html>`_.
:param algorithm: Hashing algorithm.
:type algorithm: string
:param hash: Hash code.
:type hash: string
'''
self.verify_hash = True
self.hash_algorithm = algorithm
self.hash_code = hash
def fetch_hash_sums(self):
'''
Will attempt to fetch UNIX hash sums files (`SHA256SUMS`, `SHA1SUMS` or `MD5SUMS` files in
the same url directory).
Calls `self.add_hash_verification` if successful. Returns if a matching hash was found.
:rtype: bool
*New in 1.2.1*
'''
default_sums_filenames = ['SHA256SUMS', 'SHA1SUMS', 'MD5SUMS']
folder = os.path.dirname(self.url)
orig_basename = os.path.basename(self.url)
self.logger.info("Looking for SUMS files...")
for filename in default_sums_filenames:
try:
sums_url = "%s/%s" % (folder, filename)
sumsRequest = urllib.request.Request(sums_url, **self.requestArgs)
obj = urllib.request.urlopen(sumsRequest)
data = obj.read().split('\n')
obj.close()
for line in data:
if orig_basename.lower() in line.lower():
self.logger.info("Found a matching hash in %s" % sums_url)
algo = filename.rstrip('SUMS')
hash = line.split(' ')[0]
self.add_hash_verification(algo, hash)
return
except urllib.error.HTTPError:
continue
def start(self, blocking=None):
'''
Starts the download task. Will raise `RuntimeError` if it's the object's already downloading.
.. warning::
If you're using the non-blocking mode, Exceptions won't be raised. In that case, call
`isSuccessful()` after the task is finished, to make sure the download succeeded. Call
`get_errors()` to get the the exceptions.
:param blocking: If true, calling this function will block the thread until the download finished. Default is *True*.
:type blocking: bool
'''
if not self.status == "ready":
raise RuntimeError("cannot start (current status is {})".format(self.status))
self.logger.info('Starting a new SmartDL operation.')
if blocking is None:
blocking = self._start_func_blocking
else:
self._start_func_blocking = blocking
if self.mirrors:
self.logger.info('One URL and {} mirrors are loaded.'.format(len(self.mirrors)))
else:
self.logger.info('One URL is loaded.')
if self.verify_hash and os.path.exists(self.dest):
if utils.get_file_hash(self.hash_algorithm, self.dest) == self.hash_code:
self.logger.info("Destination '%s' already exists, and the hash matches. No need to download." % self.dest)
self.status = 'finished'
return
self.logger.info("Downloading '{}' to '{}'...".format(self.url, self.dest))
req = urllib.request.Request(self.url, **self.requestArgs)
try:
urlObj = urllib.request.urlopen(req, timeout=self.timeout, context=self.context)
except (urllib.error.HTTPError, urllib.error.URLError, socket.timeout) as e:
self.errors.append(e)
if self.mirrors:
self.logger.info("{} Trying next mirror...".format(str(e)))
self.url = self.mirrors.pop(0)
self.logger.info('Using url "{}"'.format(self.url))
self.start(blocking)
return
else:
self.logger.warning(str(e))
self.errors.append(e)
self._failed = True
self.status = "finished"
raise
try:
self.filesize = int(urlObj.headers["Content-Length"])
self.logger.info("Content-Length is {} ({}).".format(self.filesize, utils.sizeof_human(self.filesize)))
except (IndexError, KeyError, TypeError):
self.logger.warning("Server did not send Content-Length. Filesize is unknown.")
self.filesize = 0
args = utils.calc_chunk_size(self.filesize, self.threads_count, self.minChunkFile)
bytes_per_thread = args[0][1] - args[0][0] + 1
if len(args)>1:
self.logger.info("Launching {} threads (downloads {}/thread).".format(len(args), utils.sizeof_human(bytes_per_thread)))
else:
self.logger.info("Launching 1 thread (downloads {}).".format(utils.sizeof_human(bytes_per_thread)))
self.status = "downloading"
for i, arg in enumerate(args):
req = self.pool.submit(
download,
self.url,
self.dest+".%.3d" % i,
self.requestArgs,
self.context,
arg[0],
arg[1],
self.timeout,
self.shared_var,
self.thread_shared_cmds,
self.logger
)
self.post_threadpool_thread = threading.Thread(
target=post_threadpool_actions,
args=(
self.pool,
[[(self.dest+".%.3d" % i) for i in range(len(args))], self.dest],
self.filesize,
self
)
)
self.post_threadpool_thread.daemon = True
self.post_threadpool_thread.start()
self.control_thread = ControlThread(self)
if blocking:
self.wait(raise_exceptions=True)
def _exc_callback(self, req, e):
self.errors.append(e[0])
self.logger.exception(e[1])
def retry(self, eStr=""):
if self.current_attemp < self.attemps_limit:
self.current_attemp += 1
self.status = "ready"
self.shared_var.value = 0
self.thread_shared_cmds = {}
self.start()
else:
s = 'The maximum retry attempts reached'
if eStr:
s += " ({})".format(eStr)
self.errors.append(urllib.error.HTTPError(self.url, "0", s, {}, StringIO()))
self._failed = True
def try_next_mirror(self, e=None):
if self.mirrors:
if e:
self.errors.append(e)
self.status = "ready"
self.shared_var.value = 0
self.url = self.mirrors.pop(0)
self.logger.info('Using url "{}"'.format(self.url))
self.start()
else:
self._failed = True
self.errors.append(e)
def get_eta(self, human=False):
'''
Get estimated time of download completion, in seconds. Returns `0` if there is
no enough data to calculate the estimated time (this will happen on the approx.
first 5 seconds of each download).
:param human: If true, returns a human-readable formatted string. Else, returns an int type number
:type human: bool
:rtype: int/string
'''
if human:
s = utils.time_human(self.control_thread.get_eta())
return s if s else "TBD"
return self.control_thread.get_eta()
def get_speed(self, human=False):
'''
Get current transfer speed in bytes per second.
:param human: If true, returns a human-readable formatted string. Else, returns an int type number
:type human: bool
:rtype: int/string
'''
if human:
return "{}/s".format(utils.sizeof_human(self.control_thread.get_speed()))
return self.control_thread.get_speed()
def get_progress(self):
'''
Returns the current progress of the download, as a float between `0` and `1`.
:rtype: float
'''
if not self.filesize:
return 0
if self.control_thread.get_dl_size() <= self.filesize:
return 1.0*self.control_thread.get_dl_size()/self.filesize
return 1.0
def get_progress_bar(self, length=20):
'''
Returns the current progress of the download as a string containing a progress bar.
.. NOTE::
That's an alias for pySmartDL.utils.progress_bar(obj.get_progress()).
:param length: The length of the progress bar in chars. Default is 20.
:type length: int
:rtype: string
'''
return utils.progress_bar(self.get_progress(), length)
def isFinished(self):
'''
Returns if the task is finished.
:rtype: bool
'''
if self.status == "ready":
return False
if self.status == "finished":
return True
return not self.post_threadpool_thread.is_alive()
def isSuccessful(self):
'''
Returns if the download is successfull. It may fail in the following scenarios:
- Hash check is enabled and fails.
- All mirrors are down.
- Any local I/O problems (such as `no disk space available`).
.. NOTE::
Call `get_errors()` to get the exceptions, if any.
Will raise `RuntimeError` if it's called when the download task is not finished yet.
:rtype: bool
'''
if self._killed:
return False
n = 0
while self.status != 'finished':
n += 1
time.sleep(0.1)
if n >= 15:
raise RuntimeError("The download task must be finished in order to see if it's successful. (current status is {})".format(self.status))
return not self._failed
def get_errors(self):
'''
Get errors happened while downloading.
:rtype: list of `Exception` instances
'''
return self.errors
def get_status(self):
'''
Returns the current status of the task. Possible values: *ready*,
*downloading*, *paused*, *combining*, *finished*.
:rtype: string
'''
return self.status
def wait(self, raise_exceptions=False):
'''
Blocks until the download is finished.
:param raise_exceptions: If true, this function will raise exceptions. Default is *False*.
:type raise_exceptions: bool
'''
if self.status in ["ready", "finished"]:
return
while not self.isFinished():
time.sleep(0.1)
self.post_threadpool_thread.join()
self.control_thread.join()
if self._failed and raise_exceptions:
raise self.errors[-1]
def stop(self):
'''
Stops the download.
'''
if self.status == "downloading":
self.thread_shared_cmds['stop'] = ""
self._killed = True
def pause(self):
'''
Pauses the download.
'''
if self.status == "downloading":
self.status = "paused"
self.thread_shared_cmds['pause'] = ""
def resume(self):
'''
Continues the download. same as unpause().
'''
self.unpause()
def unpause(self):
'''
Continues the download. same as resume().
'''
if self.status == "paused" and 'pause' in self.thread_shared_cmds:
self.status = "downloading"
del self.thread_shared_cmds['pause']
def limit_speed(self, speed):
'''
Limits the download transfer speed.
:param speed: Speed in bytes per download per second. Negative values will not limit the speed. Default is `-1`.
:type speed: int
'''
if self.status == "downloading":
if speed == 0:
self.pause()
else:
self.unpause()
if speed > 0:
self.thread_shared_cmds['limit'] = speed/self.threads_count
elif 'limit' in self.thread_shared_cmds:
del self.thread_shared_cmds['limit']
def get_dest(self):
'''
Get the destination path of the downloaded file. Needed when no
destination is provided to the class, and exists on a temp folder.
:rtype: string
'''
return self.dest
def get_dl_time(self, human=False):
'''
Returns how much time did the download take, in seconds. Returns
`-1` if the download task is not finished yet.
:param human: If true, returns a human-readable formatted string. Else, returns an int type number
:type human: bool
:rtype: int/string
'''
if not self.control_thread:
return 0
if human:
return utils.time_human(self.control_thread.get_dl_time())
return self.control_thread.get_dl_time()
def get_dl_size(self, human=False):
'''
Get downloaded bytes counter in bytes.
:param human: If true, returns a human-readable formatted string. Else, returns an int type number
:type human: bool
:rtype: int/string
'''
if not self.control_thread:
return 0
if human:
return utils.sizeof_human(self.control_thread.get_dl_size())
return self.control_thread.get_dl_size()
def get_final_filesize(self, human=False):
'''
Get total download size in bytes.
:param human: If true, returns a human-readable formatted string. Else, returns an int type number
:type human: bool
:rtype: int/string
'''
if not self.control_thread:
return 0
if human:
return utils.sizeof_human(self.control_thread.get_final_filesize())
return self.control_thread.get_final_filesize()
def get_data(self, binary=False, bytes=-1):
'''
Returns the downloaded data. Will raise `RuntimeError` if it's
called when the download task is not finished yet.
:param binary: If true, will read the data as binary. Else, will read it as text.
:type binary: bool
:param bytes: Number of bytes to read. Negative values will read until EOF. Default is `-1`.
:type bytes: int
:rtype: string
'''
if self.status != 'finished':
raise RuntimeError("The download task must be finished in order to read the data. (current status is %s)" % self.status)
flags = 'rb' if binary else 'r'
with open(self.get_dest(), flags) as f:
data = f.read(bytes) if bytes>0 else f.read()
return data
def get_data_hash(self, algorithm):
'''
Returns the downloaded data's hash. Will raise `RuntimeError` if it's
called when the download task is not finished yet.
:param algorithm: Hashing algorithm.
:type algorithm: bool
:rtype: string
.. WARNING::
The hashing algorithm must be supported on your system, as documented at `hashlib documentation page <http://docs.python.org/3/library/hashlib.html>`_.
'''
return hashlib.new(algorithm, self.get_data(binary=True)).hexdigest()
def get_json(self):
'''
Returns the JSON in the downloaded data. Will raise `RuntimeError` if it's
called when the download task is not finished yet. Will raise `json.decoder.JSONDecodeError`
if the downloaded data is not valid JSON.
:rtype: dict
'''
data = self.get_data()
return json.loads(data)
def post_threadpool_actions(pool, args, expected_filesize, SmartDLObj):
"Run function after thread pool is done. Run this in a thread."
while not pool.done():
time.sleep(0.1)
if SmartDLObj._killed:
return
if pool.get_exception():
for exc in pool.get_exceptions():
SmartDLObj.logger.exception(exc)
SmartDLObj.retry(str(pool.get_exception()))
if SmartDLObj._failed:
SmartDLObj.logger.warning("Task had errors. Exiting...")
return
if expected_filesize: # if not zero, expected filesize is known
threads = len(args[0])
total_filesize = sum([os.path.getsize(x) for x in args[0]])
diff = math.fabs(expected_filesize - total_filesize)
# if the difference is more than 4*thread numbers (because a thread may download 4KB extra per thread because of NTFS's block size)
if diff > 4*1024*threads:
errMsg = 'Diff between downloaded files and expected filesizes is {}B (filesize: {}, expected_filesize: {}, {} threads).'.format(total_filesize, expected_filesize, diff, threads)
SmartDLObj.logger.warning(errMsg)
SmartDLObj.retry(errMsg)
return
SmartDLObj.status = "combining"
utils.combine_files(*args)
if SmartDLObj.verify_hash:
dest_path = args[-1]
hash_ = utils.get_file_hash(SmartDLObj.hash_algorithm, dest_path)
if hash_ == SmartDLObj.hash_code:
SmartDLObj.logger.info('Hash verification succeeded.')
else:
SmartDLObj.logger.warning('Hash verification failed.')
SmartDLObj.try_next_mirror(HashFailedException(os.path.basename(dest_path), hash, SmartDLObj.hash_code))
|
|
"""Test for RFLink cover components.
Test setup of RFLink covers component/platform. State tracking and
control of RFLink cover devices.
"""
import logging
from homeassistant.components.rflink import EVENT_BUTTON_PRESSED
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_CLOSE_COVER,
SERVICE_OPEN_COVER,
STATE_CLOSED,
STATE_OPEN,
)
from homeassistant.core import CoreState, State, callback
from tests.common import mock_restore_cache
from tests.components.rflink.test_init import mock_rflink
DOMAIN = "cover"
CONFIG = {
"rflink": {
"port": "/dev/ttyABC0",
"ignore_devices": ["ignore_wildcard_*", "ignore_cover"],
},
DOMAIN: {
"platform": "rflink",
"devices": {
"protocol_0_0": {"name": "test", "aliases": ["test_alias_0_0"]},
"cover_0_0": {"name": "dim_test"},
"cover_0_1": {"name": "cover_test"},
},
},
}
_LOGGER = logging.getLogger(__name__)
async def test_default_setup(hass, monkeypatch):
"""Test all basic functionality of the RFLink cover component."""
# setup mocking rflink module
event_callback, create, protocol, _ = await mock_rflink(
hass, CONFIG, DOMAIN, monkeypatch
)
# make sure arguments are passed
assert create.call_args_list[0][1]["ignore"]
# test default state of cover loaded from config
cover_initial = hass.states.get(f"{DOMAIN}.test")
assert cover_initial.state == STATE_CLOSED
assert cover_initial.attributes["assumed_state"]
# cover should follow state of the hardware device by interpreting
# incoming events for its name and aliases
# mock incoming command event for this device
event_callback({"id": "protocol_0_0", "command": "up"})
await hass.async_block_till_done()
cover_after_first_command = hass.states.get(f"{DOMAIN}.test")
assert cover_after_first_command.state == STATE_OPEN
# not sure why, but cover have always assumed_state=true
assert cover_after_first_command.attributes.get("assumed_state")
# mock incoming command event for this device
event_callback({"id": "protocol_0_0", "command": "down"})
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.test").state == STATE_CLOSED
# should respond to group command
event_callback({"id": "protocol_0_0", "command": "allon"})
await hass.async_block_till_done()
cover_after_first_command = hass.states.get(f"{DOMAIN}.test")
assert cover_after_first_command.state == STATE_OPEN
# should respond to group command
event_callback({"id": "protocol_0_0", "command": "alloff"})
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.test").state == STATE_CLOSED
# test following aliases
# mock incoming command event for this device alias
event_callback({"id": "test_alias_0_0", "command": "up"})
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.test").state == STATE_OPEN
# test changing state from HA propagates to RFLink
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: f"{DOMAIN}.test"}
)
)
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.test").state == STATE_CLOSED
assert protocol.send_command_ack.call_args_list[0][0][0] == "protocol_0_0"
assert protocol.send_command_ack.call_args_list[0][0][1] == "DOWN"
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: f"{DOMAIN}.test"}
)
)
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.test").state == STATE_OPEN
assert protocol.send_command_ack.call_args_list[1][0][1] == "UP"
async def test_firing_bus_event(hass, monkeypatch):
"""Incoming RFLink command events should be put on the HA event bus."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {
"protocol_0_0": {
"name": "test",
"aliases": ["test_alias_0_0"],
"fire_event": True,
}
},
},
}
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
calls = []
@callback
def listener(event):
calls.append(event)
hass.bus.async_listen_once(EVENT_BUTTON_PRESSED, listener)
# test event for new unconfigured sensor
event_callback({"id": "protocol_0_0", "command": "down"})
await hass.async_block_till_done()
await hass.async_block_till_done()
assert calls[0].data == {"state": "down", "entity_id": f"{DOMAIN}.test"}
async def test_signal_repetitions(hass, monkeypatch):
"""Command should be sent amount of configured repetitions."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"device_defaults": {"signal_repetitions": 3},
"devices": {
"protocol_0_0": {"name": "test", "signal_repetitions": 2},
"protocol_0_1": {"name": "test1"},
},
},
}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
# test if signal repetition is performed according to configuration
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: f"{DOMAIN}.test"}
)
)
# wait for commands and repetitions to finish
await hass.async_block_till_done()
assert protocol.send_command_ack.call_count == 2
# test if default apply to configured devices
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: f"{DOMAIN}.test1"}
)
)
# wait for commands and repetitions to finish
await hass.async_block_till_done()
assert protocol.send_command_ack.call_count == 5
async def test_signal_repetitions_alternation(hass, monkeypatch):
"""Simultaneously switching entities must alternate repetitions."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {
"protocol_0_0": {"name": "test", "signal_repetitions": 2},
"protocol_0_1": {"name": "test1", "signal_repetitions": 2},
},
},
}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: f"{DOMAIN}.test"}
)
)
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: f"{DOMAIN}.test1"}
)
)
await hass.async_block_till_done()
assert protocol.send_command_ack.call_args_list[0][0][0] == "protocol_0_0"
assert protocol.send_command_ack.call_args_list[1][0][0] == "protocol_0_1"
assert protocol.send_command_ack.call_args_list[2][0][0] == "protocol_0_0"
assert protocol.send_command_ack.call_args_list[3][0][0] == "protocol_0_1"
async def test_signal_repetitions_cancelling(hass, monkeypatch):
"""Cancel outstanding repetitions when state changed."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {"protocol_0_0": {"name": "test", "signal_repetitions": 3}},
},
}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: f"{DOMAIN}.test"}
)
)
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: f"{DOMAIN}.test"}
)
)
await hass.async_block_till_done()
assert protocol.send_command_ack.call_args_list[0][0][1] == "DOWN"
assert protocol.send_command_ack.call_args_list[1][0][1] == "UP"
assert protocol.send_command_ack.call_args_list[2][0][1] == "UP"
assert protocol.send_command_ack.call_args_list[3][0][1] == "UP"
async def test_group_alias(hass, monkeypatch):
"""Group aliases should only respond to group commands (allon/alloff)."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {
"protocol_0_0": {"name": "test", "group_aliases": ["test_group_0_0"]}
},
},
}
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
assert hass.states.get(f"{DOMAIN}.test").state == STATE_CLOSED
# test sending group command to group alias
event_callback({"id": "test_group_0_0", "command": "allon"})
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.test").state == STATE_OPEN
# test sending group command to group alias
event_callback({"id": "test_group_0_0", "command": "down"})
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.test").state == STATE_OPEN
async def test_nogroup_alias(hass, monkeypatch):
"""Non group aliases should not respond to group commands."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {
"protocol_0_0": {
"name": "test",
"nogroup_aliases": ["test_nogroup_0_0"],
}
},
},
}
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
assert hass.states.get(f"{DOMAIN}.test").state == STATE_CLOSED
# test sending group command to nogroup alias
event_callback({"id": "test_nogroup_0_0", "command": "allon"})
await hass.async_block_till_done()
# should not affect state
assert hass.states.get(f"{DOMAIN}.test").state == STATE_CLOSED
# test sending group command to nogroup alias
event_callback({"id": "test_nogroup_0_0", "command": "up"})
await hass.async_block_till_done()
# should affect state
assert hass.states.get(f"{DOMAIN}.test").state == STATE_OPEN
async def test_nogroup_device_id(hass, monkeypatch):
"""Device id that do not respond to group commands (allon/alloff)."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {"test_nogroup_0_0": {"name": "test", "group": False}},
},
}
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
assert hass.states.get(f"{DOMAIN}.test").state == STATE_CLOSED
# test sending group command to nogroup
event_callback({"id": "test_nogroup_0_0", "command": "allon"})
await hass.async_block_till_done()
# should not affect state
assert hass.states.get(f"{DOMAIN}.test").state == STATE_CLOSED
# test sending group command to nogroup
event_callback({"id": "test_nogroup_0_0", "command": "up"})
await hass.async_block_till_done()
# should affect state
assert hass.states.get(f"{DOMAIN}.test").state == STATE_OPEN
async def test_restore_state(hass, monkeypatch):
"""Ensure states are restored on startup."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {
"RTS_12345678_0": {"name": "c1"},
"test_restore_2": {"name": "c2"},
"test_restore_3": {"name": "c3"},
"test_restore_4": {"name": "c4"},
},
},
}
mock_restore_cache(
hass, (State(f"{DOMAIN}.c1", STATE_OPEN), State(f"{DOMAIN}.c2", STATE_CLOSED))
)
hass.state = CoreState.starting
# setup mocking rflink module
_, _, _, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
state = hass.states.get(f"{DOMAIN}.c1")
assert state
assert state.state == STATE_OPEN
state = hass.states.get(f"{DOMAIN}.c2")
assert state
assert state.state == STATE_CLOSED
state = hass.states.get(f"{DOMAIN}.c3")
assert state
assert state.state == STATE_CLOSED
# not cached cover must default values
state = hass.states.get(f"{DOMAIN}.c4")
assert state
assert state.state == STATE_CLOSED
assert state.attributes["assumed_state"]
# The code checks the ID, it will use the
# 'inverted' class when the name starts with
# 'newkaku'
async def test_inverted_cover(hass, monkeypatch):
"""Ensure states are restored on startup."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {
"nonkaku_device_1": {
"name": "nonkaku_type_standard",
"type": "standard",
},
"nonkaku_device_2": {"name": "nonkaku_type_none"},
"nonkaku_device_3": {
"name": "nonkaku_type_inverted",
"type": "inverted",
},
"newkaku_device_4": {
"name": "newkaku_type_standard",
"type": "standard",
},
"newkaku_device_5": {"name": "newkaku_type_none"},
"newkaku_device_6": {
"name": "newkaku_type_inverted",
"type": "inverted",
},
},
},
}
# setup mocking rflink module
event_callback, _, protocol, _ = await mock_rflink(
hass, config, DOMAIN, monkeypatch
)
# test default state of cover loaded from config
standard_cover = hass.states.get(f"{DOMAIN}.nonkaku_type_standard")
assert standard_cover.state == STATE_CLOSED
assert standard_cover.attributes["assumed_state"]
# mock incoming up command event for nonkaku_device_1
event_callback({"id": "nonkaku_device_1", "command": "up"})
await hass.async_block_till_done()
standard_cover = hass.states.get(f"{DOMAIN}.nonkaku_type_standard")
assert standard_cover.state == STATE_OPEN
assert standard_cover.attributes.get("assumed_state")
# mock incoming up command event for nonkaku_device_2
event_callback({"id": "nonkaku_device_2", "command": "up"})
await hass.async_block_till_done()
standard_cover = hass.states.get(f"{DOMAIN}.nonkaku_type_none")
assert standard_cover.state == STATE_OPEN
assert standard_cover.attributes.get("assumed_state")
# mock incoming up command event for nonkaku_device_3
event_callback({"id": "nonkaku_device_3", "command": "up"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(f"{DOMAIN}.nonkaku_type_inverted")
assert inverted_cover.state == STATE_OPEN
assert inverted_cover.attributes.get("assumed_state")
# mock incoming up command event for newkaku_device_4
event_callback({"id": "newkaku_device_4", "command": "up"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(f"{DOMAIN}.newkaku_type_standard")
assert inverted_cover.state == STATE_OPEN
assert inverted_cover.attributes.get("assumed_state")
# mock incoming up command event for newkaku_device_5
event_callback({"id": "newkaku_device_5", "command": "up"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(f"{DOMAIN}.newkaku_type_none")
assert inverted_cover.state == STATE_OPEN
assert inverted_cover.attributes.get("assumed_state")
# mock incoming up command event for newkaku_device_6
event_callback({"id": "newkaku_device_6", "command": "up"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(f"{DOMAIN}.newkaku_type_inverted")
assert inverted_cover.state == STATE_OPEN
assert inverted_cover.attributes.get("assumed_state")
# mock incoming down command event for nonkaku_device_1
event_callback({"id": "nonkaku_device_1", "command": "down"})
await hass.async_block_till_done()
standard_cover = hass.states.get(f"{DOMAIN}.nonkaku_type_standard")
assert standard_cover.state == STATE_CLOSED
assert standard_cover.attributes.get("assumed_state")
# mock incoming down command event for nonkaku_device_2
event_callback({"id": "nonkaku_device_2", "command": "down"})
await hass.async_block_till_done()
standard_cover = hass.states.get(f"{DOMAIN}.nonkaku_type_none")
assert standard_cover.state == STATE_CLOSED
assert standard_cover.attributes.get("assumed_state")
# mock incoming down command event for nonkaku_device_3
event_callback({"id": "nonkaku_device_3", "command": "down"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(f"{DOMAIN}.nonkaku_type_inverted")
assert inverted_cover.state == STATE_CLOSED
assert inverted_cover.attributes.get("assumed_state")
# mock incoming down command event for newkaku_device_4
event_callback({"id": "newkaku_device_4", "command": "down"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(f"{DOMAIN}.newkaku_type_standard")
assert inverted_cover.state == STATE_CLOSED
assert inverted_cover.attributes.get("assumed_state")
# mock incoming down command event for newkaku_device_5
event_callback({"id": "newkaku_device_5", "command": "down"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(f"{DOMAIN}.newkaku_type_none")
assert inverted_cover.state == STATE_CLOSED
assert inverted_cover.attributes.get("assumed_state")
# mock incoming down command event for newkaku_device_6
event_callback({"id": "newkaku_device_6", "command": "down"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(f"{DOMAIN}.newkaku_type_inverted")
assert inverted_cover.state == STATE_CLOSED
assert inverted_cover.attributes.get("assumed_state")
# We are only testing the 'inverted' devices, the 'standard' devices
# are already covered by other test cases.
# should respond to group command
event_callback({"id": "nonkaku_device_3", "command": "alloff"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(f"{DOMAIN}.nonkaku_type_inverted")
assert inverted_cover.state == STATE_CLOSED
# should respond to group command
event_callback({"id": "nonkaku_device_3", "command": "allon"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(f"{DOMAIN}.nonkaku_type_inverted")
assert inverted_cover.state == STATE_OPEN
# should respond to group command
event_callback({"id": "newkaku_device_4", "command": "alloff"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(f"{DOMAIN}.newkaku_type_standard")
assert inverted_cover.state == STATE_CLOSED
# should respond to group command
event_callback({"id": "newkaku_device_4", "command": "allon"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(f"{DOMAIN}.newkaku_type_standard")
assert inverted_cover.state == STATE_OPEN
# should respond to group command
event_callback({"id": "newkaku_device_5", "command": "alloff"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(f"{DOMAIN}.newkaku_type_none")
assert inverted_cover.state == STATE_CLOSED
# should respond to group command
event_callback({"id": "newkaku_device_5", "command": "allon"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(f"{DOMAIN}.newkaku_type_none")
assert inverted_cover.state == STATE_OPEN
# should respond to group command
event_callback({"id": "newkaku_device_6", "command": "alloff"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(f"{DOMAIN}.newkaku_type_inverted")
assert inverted_cover.state == STATE_CLOSED
# should respond to group command
event_callback({"id": "newkaku_device_6", "command": "allon"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(f"{DOMAIN}.newkaku_type_inverted")
assert inverted_cover.state == STATE_OPEN
# Sending the close command from HA should result
# in an 'DOWN' command sent to a non-newkaku device
# that has its type set to 'standard'.
hass.async_create_task(
hass.services.async_call(
DOMAIN,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: f"{DOMAIN}.nonkaku_type_standard"},
)
)
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.nonkaku_type_standard").state == STATE_CLOSED
assert protocol.send_command_ack.call_args_list[0][0][0] == "nonkaku_device_1"
assert protocol.send_command_ack.call_args_list[0][0][1] == "DOWN"
# Sending the open command from HA should result
# in an 'UP' command sent to a non-newkaku device
# that has its type set to 'standard'.
hass.async_create_task(
hass.services.async_call(
DOMAIN,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: f"{DOMAIN}.nonkaku_type_standard"},
)
)
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.nonkaku_type_standard").state == STATE_OPEN
assert protocol.send_command_ack.call_args_list[1][0][0] == "nonkaku_device_1"
assert protocol.send_command_ack.call_args_list[1][0][1] == "UP"
# Sending the close command from HA should result
# in an 'DOWN' command sent to a non-newkaku device
# that has its type not specified.
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: f"{DOMAIN}.nonkaku_type_none"}
)
)
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.nonkaku_type_none").state == STATE_CLOSED
assert protocol.send_command_ack.call_args_list[2][0][0] == "nonkaku_device_2"
assert protocol.send_command_ack.call_args_list[2][0][1] == "DOWN"
# Sending the open command from HA should result
# in an 'UP' command sent to a non-newkaku device
# that has its type not specified.
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: f"{DOMAIN}.nonkaku_type_none"}
)
)
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.nonkaku_type_none").state == STATE_OPEN
assert protocol.send_command_ack.call_args_list[3][0][0] == "nonkaku_device_2"
assert protocol.send_command_ack.call_args_list[3][0][1] == "UP"
# Sending the close command from HA should result
# in an 'UP' command sent to a non-newkaku device
# that has its type set to 'inverted'.
hass.async_create_task(
hass.services.async_call(
DOMAIN,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: f"{DOMAIN}.nonkaku_type_inverted"},
)
)
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.nonkaku_type_inverted").state == STATE_CLOSED
assert protocol.send_command_ack.call_args_list[4][0][0] == "nonkaku_device_3"
assert protocol.send_command_ack.call_args_list[4][0][1] == "UP"
# Sending the open command from HA should result
# in an 'DOWN' command sent to a non-newkaku device
# that has its type set to 'inverted'.
hass.async_create_task(
hass.services.async_call(
DOMAIN,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: f"{DOMAIN}.nonkaku_type_inverted"},
)
)
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.nonkaku_type_inverted").state == STATE_OPEN
assert protocol.send_command_ack.call_args_list[5][0][0] == "nonkaku_device_3"
assert protocol.send_command_ack.call_args_list[5][0][1] == "DOWN"
# Sending the close command from HA should result
# in an 'DOWN' command sent to a newkaku device
# that has its type set to 'standard'.
hass.async_create_task(
hass.services.async_call(
DOMAIN,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: f"{DOMAIN}.newkaku_type_standard"},
)
)
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.newkaku_type_standard").state == STATE_CLOSED
assert protocol.send_command_ack.call_args_list[6][0][0] == "newkaku_device_4"
assert protocol.send_command_ack.call_args_list[6][0][1] == "DOWN"
# Sending the open command from HA should result
# in an 'UP' command sent to a newkaku device
# that has its type set to 'standard'.
hass.async_create_task(
hass.services.async_call(
DOMAIN,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: f"{DOMAIN}.newkaku_type_standard"},
)
)
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.newkaku_type_standard").state == STATE_OPEN
assert protocol.send_command_ack.call_args_list[7][0][0] == "newkaku_device_4"
assert protocol.send_command_ack.call_args_list[7][0][1] == "UP"
# Sending the close command from HA should result
# in an 'UP' command sent to a newkaku device
# that has its type not specified.
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: f"{DOMAIN}.newkaku_type_none"}
)
)
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.newkaku_type_none").state == STATE_CLOSED
assert protocol.send_command_ack.call_args_list[8][0][0] == "newkaku_device_5"
assert protocol.send_command_ack.call_args_list[8][0][1] == "UP"
# Sending the open command from HA should result
# in an 'DOWN' command sent to a newkaku device
# that has its type not specified.
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: f"{DOMAIN}.newkaku_type_none"}
)
)
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.newkaku_type_none").state == STATE_OPEN
assert protocol.send_command_ack.call_args_list[9][0][0] == "newkaku_device_5"
assert protocol.send_command_ack.call_args_list[9][0][1] == "DOWN"
# Sending the close command from HA should result
# in an 'UP' command sent to a newkaku device
# that has its type set to 'inverted'.
hass.async_create_task(
hass.services.async_call(
DOMAIN,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: f"{DOMAIN}.newkaku_type_inverted"},
)
)
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.newkaku_type_inverted").state == STATE_CLOSED
assert protocol.send_command_ack.call_args_list[10][0][0] == "newkaku_device_6"
assert protocol.send_command_ack.call_args_list[10][0][1] == "UP"
# Sending the open command from HA should result
# in an 'DOWN' command sent to a newkaku device
# that has its type set to 'inverted'.
hass.async_create_task(
hass.services.async_call(
DOMAIN,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: f"{DOMAIN}.newkaku_type_inverted"},
)
)
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.newkaku_type_inverted").state == STATE_OPEN
assert protocol.send_command_ack.call_args_list[11][0][0] == "newkaku_device_6"
assert protocol.send_command_ack.call_args_list[11][0][1] == "DOWN"
|
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test Interface implementation
"""
# pylint:disable=protected-access
import unittest
from zope.interface._compat import _skip_under_py3k
_marker = object()
class Test_invariant(unittest.TestCase):
def test_w_single(self):
from zope.interface.interface import invariant
from zope.interface.interface import TAGGED_DATA
def _check(*args, **kw):
raise NotImplementedError()
class Foo(object):
invariant(_check)
self.assertEqual(getattr(Foo, TAGGED_DATA, None),
{'invariants': [_check]})
def test_w_multiple(self):
from zope.interface.interface import invariant
from zope.interface.interface import TAGGED_DATA
def _check(*args, **kw):
raise NotImplementedError()
def _another_check(*args, **kw):
raise NotImplementedError()
class Foo(object):
invariant(_check)
invariant(_another_check)
self.assertEqual(getattr(Foo, TAGGED_DATA, None),
{'invariants': [_check, _another_check]})
class Test_taggedValue(unittest.TestCase):
def test_w_single(self):
from zope.interface.interface import taggedValue
from zope.interface.interface import TAGGED_DATA
class Foo(object):
taggedValue('bar', ['baz'])
self.assertEqual(getattr(Foo, TAGGED_DATA, None),
{'bar': ['baz']})
def test_w_multiple(self):
from zope.interface.interface import taggedValue
from zope.interface.interface import TAGGED_DATA
class Foo(object):
taggedValue('bar', ['baz'])
taggedValue('qux', 'spam')
self.assertEqual(getattr(Foo, TAGGED_DATA, None),
{'bar': ['baz'], 'qux': 'spam'})
def test_w_multiple_overwriting(self):
from zope.interface.interface import taggedValue
from zope.interface.interface import TAGGED_DATA
class Foo(object):
taggedValue('bar', ['baz'])
taggedValue('qux', 'spam')
taggedValue('bar', 'frob')
self.assertEqual(getattr(Foo, TAGGED_DATA, None),
{'bar': 'frob', 'qux': 'spam'})
class ElementTests(unittest.TestCase):
DEFAULT_NAME = 'AnElement'
def _getTargetClass(self):
from zope.interface.interface import Element
return Element
def _makeOne(self, name=None):
if name is None:
name = self.DEFAULT_NAME
return self._getTargetClass()(name)
def test_ctor_defaults(self):
element = self._makeOne()
self.assertEqual(element.__name__, self.DEFAULT_NAME)
self.assertEqual(element.getName(), self.DEFAULT_NAME)
self.assertEqual(element.__doc__, '')
self.assertEqual(element.getDoc(), '')
self.assertEqual(list(element.getTaggedValueTags()), [])
def test_ctor_no_doc_space_in_name(self):
element = self._makeOne('An Element')
self.assertEqual(element.__name__, None)
self.assertEqual(element.__doc__, 'An Element')
def test_getTaggedValue_miss(self):
element = self._makeOne()
self.assertRaises(KeyError, element.getTaggedValue, 'nonesuch')
def test_queryTaggedValue_miss(self):
element = self._makeOne()
self.assertEqual(element.queryTaggedValue('nonesuch'), None)
def test_queryTaggedValue_miss_w_default(self):
element = self._makeOne()
self.assertEqual(element.queryTaggedValue('nonesuch', 'bar'), 'bar')
def test_setTaggedValue(self):
element = self._makeOne()
element.setTaggedValue('foo', 'bar')
self.assertEqual(list(element.getTaggedValueTags()), ['foo'])
self.assertEqual(element.getTaggedValue('foo'), 'bar')
self.assertEqual(element.queryTaggedValue('foo'), 'bar')
class SpecificationBasePyTests(unittest.TestCase):
def _getTargetClass(self):
from zope.interface.interface import SpecificationBasePy
return SpecificationBasePy
def _makeOne(self):
return self._getTargetClass()()
def test_providedBy_miss(self):
from zope.interface import interface
from zope.interface.declarations import _empty
sb = self._makeOne()
def _providedBy(obj):
return _empty
with _Monkey(interface, providedBy=_providedBy):
self.assertFalse(sb.providedBy(object()))
def test_providedBy_hit(self):
from zope.interface import interface
sb = self._makeOne()
class _Decl(object):
_implied = {sb: {},}
def _providedBy(obj):
return _Decl()
with _Monkey(interface, providedBy=_providedBy):
self.assertTrue(sb.providedBy(object()))
def test_implementedBy_miss(self):
from zope.interface import interface
from zope.interface.declarations import _empty
sb = self._makeOne()
def _implementedBy(obj):
return _empty
with _Monkey(interface, implementedBy=_implementedBy):
self.assertFalse(sb.implementedBy(object()))
def test_implementedBy_hit(self):
from zope.interface import interface
sb = self._makeOne()
class _Decl(object):
_implied = {sb: {},}
def _implementedBy(obj):
return _Decl()
with _Monkey(interface, implementedBy=_implementedBy):
self.assertTrue(sb.implementedBy(object()))
def test_isOrExtends_miss(self):
sb = self._makeOne()
sb._implied = {} # not defined by SpecificationBasePy
self.assertFalse(sb.isOrExtends(object()))
def test_isOrExtends_hit(self):
sb = self._makeOne()
testing = object()
sb._implied = {testing: {}} # not defined by SpecificationBasePy
self.assertTrue(sb(testing))
def test___call___miss(self):
sb = self._makeOne()
sb._implied = {} # not defined by SpecificationBasePy
self.assertFalse(sb.isOrExtends(object()))
def test___call___hit(self):
sb = self._makeOne()
testing = object()
sb._implied = {testing: {}} # not defined by SpecificationBasePy
self.assertTrue(sb(testing))
class SpecificationBaseTests(unittest.TestCase):
def _getTargetClass(self):
from zope.interface.interface import SpecificationBase
return SpecificationBase
def test_optimizations(self):
from zope.interface.interface import SpecificationBasePy
try:
import zope.interface._zope_interface_coptimizations
except ImportError:
self.assertIs(self._getTargetClass(), SpecificationBasePy)
else:
self.assertIsNot(self._getTargetClass(), SpecificationBasePy)
class InterfaceBasePyTests(unittest.TestCase):
def _getTargetClass(self):
from zope.interface.interface import InterfaceBasePy
return InterfaceBasePy
def _makeOne(self, object_should_provide):
class IB(self._getTargetClass()):
def _call_conform(self, conform):
return conform(self)
def providedBy(self, obj):
return object_should_provide
return IB()
def test___call___w___conform___returning_value(self):
ib = self._makeOne(False)
conformed = object()
class _Adapted(object):
def __conform__(self, iface):
return conformed
self.assertTrue(ib(_Adapted()) is conformed)
def test___call___w___conform___miss_ob_provides(self):
ib = self._makeOne(True)
class _Adapted(object):
def __conform__(self, iface):
return None
adapted = _Adapted()
self.assertTrue(ib(adapted) is adapted)
def test___call___wo___conform___ob_no_provides_w_alternate(self):
ib = self._makeOne(False)
adapted = object()
alternate = object()
self.assertTrue(ib(adapted, alternate) is alternate)
def test___call___w___conform___ob_no_provides_wo_alternate(self):
ib = self._makeOne(False)
adapted = object()
self.assertRaises(TypeError, ib, adapted)
def test___adapt___ob_provides(self):
ib = self._makeOne(True)
adapted = object()
self.assertTrue(ib.__adapt__(adapted) is adapted)
def test___adapt___ob_no_provides_uses_hooks(self):
from zope.interface import interface
ib = self._makeOne(False)
adapted = object()
_missed = []
def _hook_miss(iface, obj):
_missed.append((iface, obj))
return None
def _hook_hit(iface, obj):
return obj
with _Monkey(interface, adapter_hooks=[_hook_miss, _hook_hit]):
self.assertTrue(ib.__adapt__(adapted) is adapted)
self.assertEqual(_missed, [(ib, adapted)])
class InterfaceBaseTests(unittest.TestCase):
def _getTargetClass(self):
from zope.interface.interface import InterfaceBase
return InterfaceBase
def test_optimizations(self):
from zope.interface.interface import InterfaceBasePy
try:
import zope.interface._zope_interface_coptimizations
except ImportError:
self.assertIs(self._getTargetClass(), InterfaceBasePy)
else:
self.assertIsNot(self._getTargetClass(), InterfaceBasePy)
class SpecificationTests(unittest.TestCase):
def _getTargetClass(self):
from zope.interface.interface import Specification
return Specification
def _makeOne(self, bases=_marker):
if bases is _marker:
return self._getTargetClass()()
return self._getTargetClass()(bases)
def test_ctor(self):
from zope.interface.interface import Interface
spec = self._makeOne()
self.assertEqual(spec.__bases__, ())
self.assertEqual(len(spec._implied), 2)
self.assertTrue(spec in spec._implied)
self.assertTrue(Interface in spec._implied)
self.assertEqual(len(spec.dependents), 0)
def test_subscribe_first_time(self):
spec = self._makeOne()
dep = DummyDependent()
spec.subscribe(dep)
self.assertEqual(len(spec.dependents), 1)
self.assertEqual(spec.dependents[dep], 1)
def test_subscribe_again(self):
spec = self._makeOne()
dep = DummyDependent()
spec.subscribe(dep)
spec.subscribe(dep)
self.assertEqual(spec.dependents[dep], 2)
def test_unsubscribe_miss(self):
spec = self._makeOne()
dep = DummyDependent()
self.assertRaises(KeyError, spec.unsubscribe, dep)
def test_unsubscribe(self):
spec = self._makeOne()
dep = DummyDependent()
spec.subscribe(dep)
spec.subscribe(dep)
spec.unsubscribe(dep)
self.assertEqual(spec.dependents[dep], 1)
spec.unsubscribe(dep)
self.assertFalse(dep in spec.dependents)
def test___setBases_subscribes_bases_and_notifies_dependents(self):
from zope.interface.interface import Interface
spec = self._makeOne()
dep = DummyDependent()
spec.subscribe(dep)
class I(Interface):
pass
class J(Interface):
pass
spec.__bases__ = (I,)
self.assertEqual(dep._changed, [spec])
self.assertEqual(I.dependents[spec], 1)
spec.__bases__ = (J,)
self.assertEqual(I.dependents.get(spec), None)
self.assertEqual(J.dependents[spec], 1)
def test_changed_clears_volatiles_and_implied(self):
from zope.interface.interface import Interface
class I(Interface):
pass
spec = self._makeOne()
spec._v_attrs = 'Foo'
spec._implied[I] = ()
spec.changed(spec)
self.assertTrue(getattr(spec, '_v_attrs', self) is self)
self.assertFalse(I in spec._implied)
def test_interfaces_skips_already_seen(self):
from zope.interface.interface import Interface
class IFoo(Interface):
pass
spec = self._makeOne([IFoo, IFoo])
self.assertEqual(list(spec.interfaces()), [IFoo])
def test_extends_strict_wo_self(self):
from zope.interface.interface import Interface
class IFoo(Interface):
pass
spec = self._makeOne(IFoo)
self.assertFalse(spec.extends(IFoo, strict=True))
def test_extends_strict_w_self(self):
spec = self._makeOne()
self.assertFalse(spec.extends(spec, strict=True))
def test_extends_non_strict_w_self(self):
spec = self._makeOne()
self.assertTrue(spec.extends(spec, strict=False))
def test_get_hit_w__v_attrs(self):
spec = self._makeOne()
foo = object()
spec._v_attrs = {'foo': foo}
self.assertTrue(spec.get('foo') is foo)
def test_get_hit_from_base_wo__v_attrs(self):
from zope.interface.interface import Attribute
from zope.interface.interface import Interface
class IFoo(Interface):
foo = Attribute('foo')
class IBar(Interface):
bar = Attribute('bar')
spec = self._makeOne([IFoo, IBar])
self.assertTrue(spec.get('foo') is IFoo.get('foo'))
self.assertTrue(spec.get('bar') is IBar.get('bar'))
class InterfaceClassTests(unittest.TestCase):
def _getTargetClass(self):
from zope.interface.interface import InterfaceClass
return InterfaceClass
def _makeOne(self, name='ITest', bases=(), attrs=None, __doc__=None,
__module__=None):
return self._getTargetClass()(name, bases, attrs, __doc__, __module__)
def test_ctor_defaults(self):
klass = self._getTargetClass()
inst = klass('ITesting')
self.assertEqual(inst.__name__, 'ITesting')
self.assertEqual(inst.__doc__, '')
self.assertEqual(inst.__bases__, ())
self.assertEqual(inst.getBases(), ())
def test_ctor_bad_bases(self):
klass = self._getTargetClass()
self.assertRaises(TypeError, klass, 'ITesting', (object(),))
def test_ctor_w_attrs_attrib_methods(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
klass = self._getTargetClass()
inst = klass('ITesting', attrs=ATTRS)
self.assertEqual(inst.__name__, 'ITesting')
self.assertEqual(inst.__doc__, '')
self.assertEqual(inst.__bases__, ())
self.assertEqual(inst.names(), ATTRS.keys())
def test_ctor_attrs_w___locals__(self):
ATTRS = {'__locals__': {}}
klass = self._getTargetClass()
inst = klass('ITesting', attrs=ATTRS)
self.assertEqual(inst.__name__, 'ITesting')
self.assertEqual(inst.__doc__, '')
self.assertEqual(inst.__bases__, ())
self.assertEqual(inst.names(), ATTRS.keys())
def test_ctor_attrs_w__decorator_non_return(self):
from zope.interface.interface import _decorator_non_return
ATTRS = {'dropme': _decorator_non_return}
klass = self._getTargetClass()
inst = klass('ITesting', attrs=ATTRS)
self.assertEqual(inst.__name__, 'ITesting')
self.assertEqual(inst.__doc__, '')
self.assertEqual(inst.__bases__, ())
self.assertEqual(list(inst.names()), [])
def test_ctor_attrs_w_invalid_attr_type(self):
from zope.interface.exceptions import InvalidInterface
ATTRS = {'invalid': object()}
klass = self._getTargetClass()
self.assertRaises(InvalidInterface, klass, 'ITesting', attrs=ATTRS)
def test_ctor_w_explicit___doc__(self):
ATTRS = {'__doc__': 'ATTR'}
klass = self._getTargetClass()
inst = klass('ITesting', attrs=ATTRS, __doc__='EXPLICIT')
self.assertEqual(inst.__doc__, 'EXPLICIT')
def test_interfaces(self):
iface = self._makeOne()
self.assertEqual(list(iface.interfaces()), [iface])
def test_getBases(self):
iface = self._makeOne()
sub = self._makeOne('ISub', bases=(iface,))
self.assertEqual(sub.getBases(), (iface,))
def test_isEqualOrExtendedBy_identity(self):
iface = self._makeOne()
self.assertTrue(iface.isEqualOrExtendedBy(iface))
def test_isEqualOrExtendedBy_subiface(self):
iface = self._makeOne()
sub = self._makeOne('ISub', bases=(iface,))
self.assertTrue(iface.isEqualOrExtendedBy(sub))
self.assertFalse(sub.isEqualOrExtendedBy(iface))
def test_isEqualOrExtendedBy_unrelated(self):
one = self._makeOne('One')
another = self._makeOne('Another')
self.assertFalse(one.isEqualOrExtendedBy(another))
self.assertFalse(another.isEqualOrExtendedBy(one))
def test_names_w_all_False_ignores_bases(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(sorted(derived.names(all=False)), ['baz'])
def test_names_w_all_True_no_bases(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
one = self._makeOne(attrs=ATTRS)
self.assertEqual(sorted(one.names(all=True)), ['bar', 'foo'])
def test_names_w_all_True_w_bases_simple(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(sorted(derived.names(all=True)), ['bar', 'baz', 'foo'])
def test_names_w_all_True_bases_w_same_names(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
def _foo():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'foo': fromFunction(_foo),
'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(sorted(derived.names(all=True)), ['bar', 'baz', 'foo'])
def test___iter__(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
def _foo():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'foo': fromFunction(_foo),
'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(sorted(derived), ['bar', 'baz', 'foo'])
def test_namesAndDescriptions_w_all_False_ignores_bases(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(sorted(derived.namesAndDescriptions(all=False)),
[('baz', DERIVED_ATTRS['baz']),
])
def test_namesAndDescriptions_w_all_True_no_bases(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
one = self._makeOne(attrs=ATTRS)
self.assertEqual(sorted(one.namesAndDescriptions(all=False)),
[('bar', ATTRS['bar']),
('foo', ATTRS['foo']),
])
def test_namesAndDescriptions_w_all_True_simple(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(sorted(derived.namesAndDescriptions(all=True)),
[('bar', BASE_ATTRS['bar']),
('baz', DERIVED_ATTRS['baz']),
('foo', BASE_ATTRS['foo']),
])
def test_namesAndDescriptions_w_all_True_bases_w_same_names(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
def _foo():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'foo': fromFunction(_foo),
'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(sorted(derived.namesAndDescriptions(all=True)),
[('bar', BASE_ATTRS['bar']),
('baz', DERIVED_ATTRS['baz']),
('foo', DERIVED_ATTRS['foo']),
])
def test_getDescriptionFor_miss(self):
one = self._makeOne()
self.assertRaises(KeyError, one.getDescriptionFor, 'nonesuch')
def test_getDescriptionFor_hit(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
one = self._makeOne(attrs=ATTRS)
self.assertEqual(one.getDescriptionFor('foo'), ATTRS['foo'])
self.assertEqual(one.getDescriptionFor('bar'), ATTRS['bar'])
def test___getitem___miss(self):
one = self._makeOne()
def _test():
return one['nonesuch']
self.assertRaises(KeyError, _test)
def test___getitem___hit(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
one = self._makeOne(attrs=ATTRS)
self.assertEqual(one['foo'], ATTRS['foo'])
self.assertEqual(one['bar'], ATTRS['bar'])
def test___contains___miss(self):
one = self._makeOne()
self.assertFalse('nonesuch' in one)
def test___contains___hit(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
one = self._makeOne(attrs=ATTRS)
self.assertTrue('foo' in one)
self.assertTrue('bar' in one)
def test_direct_miss(self):
one = self._makeOne()
self.assertEqual(one.direct('nonesuch'), None)
def test_direct_hit_local_miss_bases(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
def _foo():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'foo': fromFunction(_foo),
'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(derived.direct('foo'), DERIVED_ATTRS['foo'])
self.assertEqual(derived.direct('baz'), DERIVED_ATTRS['baz'])
self.assertEqual(derived.direct('bar'), None)
def test_queryDescriptionFor_miss(self):
iface = self._makeOne()
self.assertEqual(iface.queryDescriptionFor('nonesuch'), None)
def test_queryDescriptionFor_hit(self):
from zope.interface import Attribute
ATTRS = {'attr': Attribute('Title', 'Description')}
iface = self._makeOne(attrs=ATTRS)
self.assertEqual(iface.queryDescriptionFor('attr'), ATTRS['attr'])
def test_validateInvariants_pass(self):
_called_with = []
def _passable(*args, **kw):
_called_with.append((args, kw))
return True
iface = self._makeOne()
obj = object()
iface.setTaggedValue('invariants', [_passable])
self.assertEqual(iface.validateInvariants(obj), None)
self.assertEqual(_called_with, [((obj,), {})])
def test_validateInvariants_fail_wo_errors_passed(self):
from zope.interface.exceptions import Invalid
_passable_called_with = []
def _passable(*args, **kw):
_passable_called_with.append((args, kw))
return True
_fail_called_with = []
def _fail(*args, **kw):
_fail_called_with.append((args, kw))
raise Invalid
iface = self._makeOne()
obj = object()
iface.setTaggedValue('invariants', [_passable, _fail])
self.assertRaises(Invalid, iface.validateInvariants, obj)
self.assertEqual(_passable_called_with, [((obj,), {})])
self.assertEqual(_fail_called_with, [((obj,), {})])
def test_validateInvariants_fail_w_errors_passed(self):
from zope.interface.exceptions import Invalid
_errors = []
_fail_called_with = []
def _fail(*args, **kw):
_fail_called_with.append((args, kw))
raise Invalid
iface = self._makeOne()
obj = object()
iface.setTaggedValue('invariants', [_fail])
self.assertRaises(Invalid, iface.validateInvariants, obj, _errors)
self.assertEqual(_fail_called_with, [((obj,), {})])
self.assertEqual(len(_errors), 1)
self.assertTrue(isinstance(_errors[0], Invalid))
def test_validateInvariants_fail_in_base_wo_errors_passed(self):
from zope.interface.exceptions import Invalid
_passable_called_with = []
def _passable(*args, **kw):
_passable_called_with.append((args, kw))
return True
_fail_called_with = []
def _fail(*args, **kw):
_fail_called_with.append((args, kw))
raise Invalid
base = self._makeOne('IBase')
derived = self._makeOne('IDerived', (base,))
obj = object()
base.setTaggedValue('invariants', [_fail])
derived.setTaggedValue('invariants', [_passable])
self.assertRaises(Invalid, derived.validateInvariants, obj)
self.assertEqual(_passable_called_with, [((obj,), {})])
self.assertEqual(_fail_called_with, [((obj,), {})])
def test_validateInvariants_fail_in_base_w_errors_passed(self):
from zope.interface.exceptions import Invalid
_errors = []
_passable_called_with = []
def _passable(*args, **kw):
_passable_called_with.append((args, kw))
return True
_fail_called_with = []
def _fail(*args, **kw):
_fail_called_with.append((args, kw))
raise Invalid
base = self._makeOne('IBase')
derived = self._makeOne('IDerived', (base,))
obj = object()
base.setTaggedValue('invariants', [_fail])
derived.setTaggedValue('invariants', [_passable])
self.assertRaises(Invalid, derived.validateInvariants, obj, _errors)
self.assertEqual(_passable_called_with, [((obj,), {})])
self.assertEqual(_fail_called_with, [((obj,), {})])
self.assertEqual(len(_errors), 1)
self.assertTrue(isinstance(_errors[0], Invalid))
def test___reduce__(self):
iface = self._makeOne('PickleMe')
self.assertEqual(iface.__reduce__(), 'PickleMe')
def test___hash___normal(self):
iface = self._makeOne('HashMe')
self.assertEqual(hash(iface),
hash((('HashMe',
'zope.interface.tests.test_interface'))))
def test___hash___missing_required_attrs(self):
import warnings
from warnings import catch_warnings
class Derived(self._getTargetClass()):
def __init__(self):
pass # Don't call base class.
derived = Derived()
with catch_warnings(record=True) as warned:
warnings.simplefilter('always') # see LP #825249
self.assertEqual(hash(derived), 1)
self.assertEqual(len(warned), 1)
self.assertTrue(warned[0].category is UserWarning)
self.assertEqual(str(warned[0].message),
'Hashing uninitialized InterfaceClass instance')
def test_comparison_with_None(self):
iface = self._makeOne()
self.assertTrue(iface < None)
self.assertTrue(iface <= None)
self.assertFalse(iface == None)
self.assertTrue(iface != None)
self.assertFalse(iface >= None)
self.assertFalse(iface > None)
self.assertFalse(None < iface)
self.assertFalse(None <= iface)
self.assertFalse(None == iface)
self.assertTrue(None != iface)
self.assertTrue(None >= iface)
self.assertTrue(None > iface)
def test_comparison_with_same_instance(self):
iface = self._makeOne()
self.assertFalse(iface < iface)
self.assertTrue(iface <= iface)
self.assertTrue(iface == iface)
self.assertFalse(iface != iface)
self.assertTrue(iface >= iface)
self.assertFalse(iface > iface)
def test_comparison_with_same_named_instance_in_other_module(self):
one = self._makeOne('IName', __module__='zope.interface.tests.one')
other = self._makeOne('IName', __module__='zope.interface.tests.other')
self.assertTrue(one < other)
self.assertFalse(other < one)
self.assertTrue(one <= other)
self.assertFalse(other <= one)
self.assertFalse(one == other)
self.assertFalse(other == one)
self.assertTrue(one != other)
self.assertTrue(other != one)
self.assertFalse(one >= other)
self.assertTrue(other >= one)
self.assertFalse(one > other)
self.assertTrue(other > one)
class InterfaceTests(unittest.TestCase):
def test_attributes_link_to_interface(self):
from zope.interface import Interface
from zope.interface import Attribute
class I1(Interface):
attr = Attribute("My attr")
self.assertTrue(I1['attr'].interface is I1)
def test_methods_link_to_interface(self):
from zope.interface import Interface
class I1(Interface):
def method(foo, bar, bingo):
"A method"
self.assertTrue(I1['method'].interface is I1)
def test_classImplements_simple(self):
from zope.interface import Interface
from zope.interface import implementedBy
from zope.interface import providedBy
class ICurrent(Interface):
def method1(a, b):
pass
def method2(a, b):
pass
class IOther(Interface):
pass
class Current(object):
__implemented__ = ICurrent
def method1(self, a, b):
raise NotImplementedError()
def method2(self, a, b):
raise NotImplementedError()
current = Current()
self.assertTrue(ICurrent.implementedBy(Current))
self.assertFalse(IOther.implementedBy(Current))
self.assertTrue(ICurrent in implementedBy(Current))
self.assertFalse(IOther in implementedBy(Current))
self.assertTrue(ICurrent in providedBy(current))
self.assertFalse(IOther in providedBy(current))
def test_classImplements_base_not_derived(self):
from zope.interface import Interface
from zope.interface import implementedBy
from zope.interface import providedBy
class IBase(Interface):
def method():
pass
class IDerived(IBase):
pass
class Current():
__implemented__ = IBase
def method(self):
raise NotImplementedError()
current = Current()
self.assertTrue(IBase.implementedBy(Current))
self.assertFalse(IDerived.implementedBy(Current))
self.assertTrue(IBase in implementedBy(Current))
self.assertFalse(IDerived in implementedBy(Current))
self.assertTrue(IBase in providedBy(current))
self.assertFalse(IDerived in providedBy(current))
def test_classImplements_base_and_derived(self):
from zope.interface import Interface
from zope.interface import implementedBy
from zope.interface import providedBy
class IBase(Interface):
def method():
pass
class IDerived(IBase):
pass
class Current(object):
__implemented__ = IDerived
def method(self):
raise NotImplementedError()
current = Current()
self.assertTrue(IBase.implementedBy(Current))
self.assertTrue(IDerived.implementedBy(Current))
self.assertFalse(IBase in implementedBy(Current))
self.assertTrue(IBase in implementedBy(Current).flattened())
self.assertTrue(IDerived in implementedBy(Current))
self.assertFalse(IBase in providedBy(current))
self.assertTrue(IBase in providedBy(current).flattened())
self.assertTrue(IDerived in providedBy(current))
def test_classImplements_multiple(self):
from zope.interface import Interface
from zope.interface import implementedBy
from zope.interface import providedBy
class ILeft(Interface):
def method():
pass
class IRight(ILeft):
pass
class Left(object):
__implemented__ = ILeft
def method(self):
raise NotImplementedError()
class Right(object):
__implemented__ = IRight
class Ambi(Left, Right):
pass
ambi = Ambi()
self.assertTrue(ILeft.implementedBy(Ambi))
self.assertTrue(IRight.implementedBy(Ambi))
self.assertTrue(ILeft in implementedBy(Ambi))
self.assertTrue(IRight in implementedBy(Ambi))
self.assertTrue(ILeft in providedBy(ambi))
self.assertTrue(IRight in providedBy(ambi))
def test_classImplements_multiple_w_explict_implements(self):
from zope.interface import Interface
from zope.interface import implementedBy
from zope.interface import providedBy
class ILeft(Interface):
def method():
pass
class IRight(ILeft):
pass
class IOther(Interface):
pass
class Left():
__implemented__ = ILeft
def method(self):
raise NotImplementedError()
class Right(object):
__implemented__ = IRight
class Other(object):
__implemented__ = IOther
class Mixed(Left, Right):
__implemented__ = Left.__implemented__, Other.__implemented__
mixed = Mixed()
self.assertTrue(ILeft.implementedBy(Mixed))
self.assertFalse(IRight.implementedBy(Mixed))
self.assertTrue(IOther.implementedBy(Mixed))
self.assertTrue(ILeft in implementedBy(Mixed))
self.assertFalse(IRight in implementedBy(Mixed))
self.assertTrue(IOther in implementedBy(Mixed))
self.assertTrue(ILeft in providedBy(mixed))
self.assertFalse(IRight in providedBy(mixed))
self.assertTrue(IOther in providedBy(mixed))
def testInterfaceExtendsInterface(self):
from zope.interface import Interface
new = Interface.__class__
FunInterface = new('FunInterface')
BarInterface = new('BarInterface', [FunInterface])
BobInterface = new('BobInterface')
BazInterface = new('BazInterface', [BobInterface, BarInterface])
self.assertTrue(BazInterface.extends(BobInterface))
self.assertTrue(BazInterface.extends(BarInterface))
self.assertTrue(BazInterface.extends(FunInterface))
self.assertFalse(BobInterface.extends(FunInterface))
self.assertFalse(BobInterface.extends(BarInterface))
self.assertTrue(BarInterface.extends(FunInterface))
self.assertFalse(BarInterface.extends(BazInterface))
def test_verifyClass(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface.verify import verifyClass
class ICheckMe(Interface):
attr = Attribute(u'My attr')
def method():
"A method"
class CheckMe(object):
__implemented__ = ICheckMe
attr = 'value'
def method(self):
raise NotImplementedError()
self.assertTrue(verifyClass(ICheckMe, CheckMe))
def test_verifyObject(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface.verify import verifyObject
class ICheckMe(Interface):
attr = Attribute(u'My attr')
def method():
"A method"
class CheckMe(object):
__implemented__ = ICheckMe
attr = 'value'
def method(self):
raise NotImplementedError()
check_me = CheckMe()
self.assertTrue(verifyObject(ICheckMe, check_me))
def test_interface_object_provides_Interface(self):
from zope.interface import Interface
class AnInterface(Interface):
pass
self.assertTrue(Interface.providedBy(AnInterface))
def test_names_simple(self):
from zope.interface import Attribute
from zope.interface import Interface
class ISimple(Interface):
attr = Attribute(u'My attr')
def method():
pass
self.assertEqual(sorted(ISimple.names()), ['attr', 'method'])
def test_names_derived(self):
from zope.interface import Attribute
from zope.interface import Interface
class IBase(Interface):
attr = Attribute(u'My attr')
def method():
pass
class IDerived(IBase):
attr2 = Attribute(u'My attr2')
def method():
pass
def method2():
pass
self.assertEqual(sorted(IDerived.names()),
['attr2', 'method', 'method2'])
self.assertEqual(sorted(IDerived.names(all=True)),
['attr', 'attr2', 'method', 'method2'])
def test_namesAndDescriptions_simple(self):
from zope.interface import Attribute
from zope.interface.interface import Method
from zope.interface import Interface
class ISimple(Interface):
attr = Attribute(u'My attr')
def method():
"My method"
name_values = sorted(ISimple.namesAndDescriptions())
self.assertEqual(len(name_values), 2)
self.assertEqual(name_values[0][0], 'attr')
self.assertTrue(isinstance(name_values[0][1], Attribute))
self.assertEqual(name_values[0][1].__name__, 'attr')
self.assertEqual(name_values[0][1].__doc__, 'My attr')
self.assertEqual(name_values[1][0], 'method')
self.assertTrue(isinstance(name_values[1][1], Method))
self.assertEqual(name_values[1][1].__name__, 'method')
self.assertEqual(name_values[1][1].__doc__, 'My method')
def test_namesAndDescriptions_derived(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface.interface import Method
class IBase(Interface):
attr = Attribute(u'My attr')
def method():
"My method"
class IDerived(IBase):
attr2 = Attribute(u'My attr2')
def method():
"My method, overridden"
def method2():
"My method2"
name_values = sorted(IDerived.namesAndDescriptions())
self.assertEqual(len(name_values), 3)
self.assertEqual(name_values[0][0], 'attr2')
self.assertTrue(isinstance(name_values[0][1], Attribute))
self.assertEqual(name_values[0][1].__name__, 'attr2')
self.assertEqual(name_values[0][1].__doc__, 'My attr2')
self.assertEqual(name_values[1][0], 'method')
self.assertTrue(isinstance(name_values[1][1], Method))
self.assertEqual(name_values[1][1].__name__, 'method')
self.assertEqual(name_values[1][1].__doc__, 'My method, overridden')
self.assertEqual(name_values[2][0], 'method2')
self.assertTrue(isinstance(name_values[2][1], Method))
self.assertEqual(name_values[2][1].__name__, 'method2')
self.assertEqual(name_values[2][1].__doc__, 'My method2')
name_values = sorted(IDerived.namesAndDescriptions(all=True))
self.assertEqual(len(name_values), 4)
self.assertEqual(name_values[0][0], 'attr')
self.assertTrue(isinstance(name_values[0][1], Attribute))
self.assertEqual(name_values[0][1].__name__, 'attr')
self.assertEqual(name_values[0][1].__doc__, 'My attr')
self.assertEqual(name_values[1][0], 'attr2')
self.assertTrue(isinstance(name_values[1][1], Attribute))
self.assertEqual(name_values[1][1].__name__, 'attr2')
self.assertEqual(name_values[1][1].__doc__, 'My attr2')
self.assertEqual(name_values[2][0], 'method')
self.assertTrue(isinstance(name_values[2][1], Method))
self.assertEqual(name_values[2][1].__name__, 'method')
self.assertEqual(name_values[2][1].__doc__, 'My method, overridden')
self.assertEqual(name_values[3][0], 'method2')
self.assertTrue(isinstance(name_values[3][1], Method))
self.assertEqual(name_values[3][1].__name__, 'method2')
self.assertEqual(name_values[3][1].__doc__, 'My method2')
def test_getDescriptionFor_nonesuch_no_default(self):
from zope.interface import Interface
class IEmpty(Interface):
pass
self.assertRaises(KeyError, IEmpty.getDescriptionFor, 'nonesuch')
def test_getDescriptionFor_simple(self):
from zope.interface import Attribute
from zope.interface.interface import Method
from zope.interface import Interface
class ISimple(Interface):
attr = Attribute(u'My attr')
def method():
"My method"
a_desc = ISimple.getDescriptionFor('attr')
self.assertTrue(isinstance(a_desc, Attribute))
self.assertEqual(a_desc.__name__, 'attr')
self.assertEqual(a_desc.__doc__, 'My attr')
m_desc = ISimple.getDescriptionFor('method')
self.assertTrue(isinstance(m_desc, Method))
self.assertEqual(m_desc.__name__, 'method')
self.assertEqual(m_desc.__doc__, 'My method')
def test_getDescriptionFor_derived(self):
from zope.interface import Attribute
from zope.interface.interface import Method
from zope.interface import Interface
class IBase(Interface):
attr = Attribute(u'My attr')
def method():
"My method"
class IDerived(IBase):
attr2 = Attribute(u'My attr2')
def method():
"My method, overridden"
def method2():
"My method2"
a_desc = IDerived.getDescriptionFor('attr')
self.assertTrue(isinstance(a_desc, Attribute))
self.assertEqual(a_desc.__name__, 'attr')
self.assertEqual(a_desc.__doc__, 'My attr')
m_desc = IDerived.getDescriptionFor('method')
self.assertTrue(isinstance(m_desc, Method))
self.assertEqual(m_desc.__name__, 'method')
self.assertEqual(m_desc.__doc__, 'My method, overridden')
a2_desc = IDerived.getDescriptionFor('attr2')
self.assertTrue(isinstance(a2_desc, Attribute))
self.assertEqual(a2_desc.__name__, 'attr2')
self.assertEqual(a2_desc.__doc__, 'My attr2')
m2_desc = IDerived.getDescriptionFor('method2')
self.assertTrue(isinstance(m2_desc, Method))
self.assertEqual(m2_desc.__name__, 'method2')
self.assertEqual(m2_desc.__doc__, 'My method2')
def test___getitem__nonesuch(self):
from zope.interface import Interface
class IEmpty(Interface):
pass
self.assertRaises(KeyError, IEmpty.__getitem__, 'nonesuch')
def test___getitem__simple(self):
from zope.interface import Attribute
from zope.interface.interface import Method
from zope.interface import Interface
class ISimple(Interface):
attr = Attribute(u'My attr')
def method():
"My method"
a_desc = ISimple['attr']
self.assertTrue(isinstance(a_desc, Attribute))
self.assertEqual(a_desc.__name__, 'attr')
self.assertEqual(a_desc.__doc__, 'My attr')
m_desc = ISimple['method']
self.assertTrue(isinstance(m_desc, Method))
self.assertEqual(m_desc.__name__, 'method')
self.assertEqual(m_desc.__doc__, 'My method')
def test___getitem___derived(self):
from zope.interface import Attribute
from zope.interface.interface import Method
from zope.interface import Interface
class IBase(Interface):
attr = Attribute(u'My attr')
def method():
"My method"
class IDerived(IBase):
attr2 = Attribute(u'My attr2')
def method():
"My method, overridden"
def method2():
"My method2"
a_desc = IDerived['attr']
self.assertTrue(isinstance(a_desc, Attribute))
self.assertEqual(a_desc.__name__, 'attr')
self.assertEqual(a_desc.__doc__, 'My attr')
m_desc = IDerived['method']
self.assertTrue(isinstance(m_desc, Method))
self.assertEqual(m_desc.__name__, 'method')
self.assertEqual(m_desc.__doc__, 'My method, overridden')
a2_desc = IDerived['attr2']
self.assertTrue(isinstance(a2_desc, Attribute))
self.assertEqual(a2_desc.__name__, 'attr2')
self.assertEqual(a2_desc.__doc__, 'My attr2')
m2_desc = IDerived['method2']
self.assertTrue(isinstance(m2_desc, Method))
self.assertEqual(m2_desc.__name__, 'method2')
self.assertEqual(m2_desc.__doc__, 'My method2')
def test___contains__nonesuch(self):
from zope.interface import Interface
class IEmpty(Interface):
pass
self.assertFalse('nonesuch' in IEmpty)
def test___contains__simple(self):
from zope.interface import Attribute
from zope.interface import Interface
class ISimple(Interface):
attr = Attribute(u'My attr')
def method():
"My method"
self.assertTrue('attr' in ISimple)
self.assertTrue('method' in ISimple)
def test___contains__derived(self):
from zope.interface import Attribute
from zope.interface import Interface
class IBase(Interface):
attr = Attribute(u'My attr')
def method():
"My method"
class IDerived(IBase):
attr2 = Attribute(u'My attr2')
def method():
"My method, overridden"
def method2():
"My method2"
self.assertTrue('attr' in IDerived)
self.assertTrue('method' in IDerived)
self.assertTrue('attr2' in IDerived)
self.assertTrue('method2' in IDerived)
def test___iter__empty(self):
from zope.interface import Interface
class IEmpty(Interface):
pass
self.assertEqual(list(IEmpty), [])
def test___iter__simple(self):
from zope.interface import Attribute
from zope.interface import Interface
class ISimple(Interface):
attr = Attribute(u'My attr')
def method():
"My method"
self.assertEqual(sorted(list(ISimple)), ['attr', 'method'])
def test___iter__derived(self):
from zope.interface import Attribute
from zope.interface import Interface
class IBase(Interface):
attr = Attribute(u'My attr')
def method():
"My method"
class IDerived(IBase):
attr2 = Attribute(u'My attr2')
def method():
"My method, overridden"
def method2():
"My method2"
self.assertEqual(sorted(list(IDerived)),
['attr', 'attr2', 'method', 'method2'])
def test_function_attributes_become_tagged_values(self):
from zope.interface import Interface
class ITagMe(Interface):
def method():
pass
method.optional = 1
method = ITagMe['method']
self.assertEqual(method.getTaggedValue('optional'), 1)
def test___doc___non_element(self):
from zope.interface import Interface
class IHaveADocString(Interface):
"xxx"
self.assertEqual(IHaveADocString.__doc__, "xxx")
self.assertEqual(list(IHaveADocString), [])
def test___doc___as_element(self):
from zope.interface import Attribute
from zope.interface import Interface
class IHaveADocString(Interface):
"xxx"
__doc__ = Attribute('the doc')
self.assertEqual(IHaveADocString.__doc__, "")
self.assertEqual(list(IHaveADocString), ['__doc__'])
def _errorsEqual(self, has_invariant, error_len, error_msgs, iface):
from zope.interface.exceptions import Invalid
self.assertRaises(Invalid, iface.validateInvariants, has_invariant)
e = []
try:
iface.validateInvariants(has_invariant, e)
self.fail("validateInvariants should always raise")
except Invalid as error:
self.assertEqual(error.args[0], e)
self.assertEqual(len(e), error_len)
msgs = [error.args[0] for error in e]
msgs.sort()
for msg in msgs:
self.assertEqual(msg, error_msgs.pop(0))
def test_invariant_simple(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import directlyProvides
from zope.interface import invariant
class IInvariant(Interface):
foo = Attribute('foo')
bar = Attribute('bar; must eval to Boolean True if foo does')
invariant(_ifFooThenBar)
class HasInvariant(object):
pass
# set up
has_invariant = HasInvariant()
directlyProvides(has_invariant, IInvariant)
# the tests
self.assertEqual(IInvariant.getTaggedValue('invariants'),
[_ifFooThenBar])
self.assertEqual(IInvariant.validateInvariants(has_invariant), None)
has_invariant.bar = 27
self.assertEqual(IInvariant.validateInvariants(has_invariant), None)
has_invariant.foo = 42
self.assertEqual(IInvariant.validateInvariants(has_invariant), None)
del has_invariant.bar
self._errorsEqual(has_invariant, 1, ['If Foo, then Bar!'],
IInvariant)
def test_invariant_nested(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import directlyProvides
from zope.interface import invariant
class IInvariant(Interface):
foo = Attribute('foo')
bar = Attribute('bar; must eval to Boolean True if foo does')
invariant(_ifFooThenBar)
class ISubInvariant(IInvariant):
invariant(_barGreaterThanFoo)
class HasInvariant(object):
pass
# nested interfaces with invariants:
self.assertEqual(ISubInvariant.getTaggedValue('invariants'),
[_barGreaterThanFoo])
has_invariant = HasInvariant()
directlyProvides(has_invariant, ISubInvariant)
has_invariant.foo = 42
# even though the interface has changed, we should still only have one
# error.
self._errorsEqual(has_invariant, 1, ['If Foo, then Bar!'],
ISubInvariant)
# however, if we set foo to 0 (Boolean False) and bar to a negative
# number then we'll get the new error
has_invariant.foo = 2
has_invariant.bar = 1
self._errorsEqual(has_invariant, 1,
['Please, Boo MUST be greater than Foo!'],
ISubInvariant)
# and if we set foo to a positive number and boo to 0, we'll
# get both errors!
has_invariant.foo = 1
has_invariant.bar = 0
self._errorsEqual(has_invariant, 2,
['If Foo, then Bar!',
'Please, Boo MUST be greater than Foo!'],
ISubInvariant)
# for a happy ending, we'll make the invariants happy
has_invariant.foo = 1
has_invariant.bar = 2
self.assertEqual(IInvariant.validateInvariants(has_invariant), None)
def test_invariant_mutandis(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import directlyProvides
from zope.interface import invariant
class IInvariant(Interface):
foo = Attribute('foo')
bar = Attribute('bar; must eval to Boolean True if foo does')
invariant(_ifFooThenBar)
class HasInvariant(object):
pass
# now we'll do two invariants on the same interface,
# just to make sure that a small
# multi-invariant interface is at least minimally tested.
has_invariant = HasInvariant()
directlyProvides(has_invariant, IInvariant)
has_invariant.foo = 42
# if you really need to mutate, then this would be the way to do it.
# Probably a bad idea, though. :-)
old_invariants = IInvariant.getTaggedValue('invariants')
invariants = old_invariants[:]
invariants.append(_barGreaterThanFoo)
IInvariant.setTaggedValue('invariants', invariants)
# even though the interface has changed, we should still only have one
# error.
self._errorsEqual(has_invariant, 1, ['If Foo, then Bar!'],
IInvariant)
# however, if we set foo to 0 (Boolean False) and bar to a negative
# number then we'll get the new error
has_invariant.foo = 2
has_invariant.bar = 1
self._errorsEqual(has_invariant, 1,
['Please, Boo MUST be greater than Foo!'], IInvariant)
# and if we set foo to a positive number and boo to 0, we'll
# get both errors!
has_invariant.foo = 1
has_invariant.bar = 0
self._errorsEqual(has_invariant, 2,
['If Foo, then Bar!',
'Please, Boo MUST be greater than Foo!'],
IInvariant)
# for another happy ending, we'll make the invariants happy again
has_invariant.foo = 1
has_invariant.bar = 2
self.assertEqual(IInvariant.validateInvariants(has_invariant), None)
# clean up
IInvariant.setTaggedValue('invariants', old_invariants)
def test___doc___element(self):
from zope.interface import Interface
from zope.interface import Attribute
class I(Interface):
"xxx"
self.assertEqual(I.__doc__, "xxx")
self.assertEqual(list(I), [])
class I(Interface):
"xxx"
__doc__ = Attribute('the doc')
self.assertEqual(I.__doc__, "")
self.assertEqual(list(I), ['__doc__'])
@_skip_under_py3k
def testIssue228(self):
# Test for http://collector.zope.org/Zope3-dev/228
# Old style classes don't have a '__class__' attribute
# No old style classes in Python 3, so the test becomes moot.
import sys
from zope.interface import Interface
class I(Interface):
"xxx"
class OldStyle:
__providedBy__ = None
self.assertRaises(AttributeError, I.providedBy, OldStyle)
def test_invariant_as_decorator(self):
from zope.interface import Interface
from zope.interface import Attribute
from zope.interface import implementer
from zope.interface import invariant
from zope.interface.exceptions import Invalid
class IRange(Interface):
min = Attribute("Lower bound")
max = Attribute("Upper bound")
@invariant
def range_invariant(ob):
if ob.max < ob.min:
raise Invalid('max < min')
@implementer(IRange)
class Range(object):
def __init__(self, min, max):
self.min, self.max = min, max
IRange.validateInvariants(Range(1,2))
IRange.validateInvariants(Range(1,1))
try:
IRange.validateInvariants(Range(2,1))
except Invalid as e:
self.assertEqual(str(e), 'max < min')
def test_taggedValue(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import taggedValue
class ITagged(Interface):
foo = Attribute('foo')
bar = Attribute('bar; must eval to Boolean True if foo does')
taggedValue('qux', 'Spam')
class HasInvariant(object):
pass
self.assertEqual(ITagged.getTaggedValue('qux'), 'Spam')
self.assertTrue('qux' in ITagged.getTaggedValueTags())
def test_description_cache_management(self):
# See https://bugs.launchpad.net/zope.interface/+bug/185974
# There was a bug where the cache used by Specification.get() was not
# cleared when the bases were changed.
from zope.interface import Interface
from zope.interface import Attribute
class I1(Interface):
a = Attribute('a')
class I2(I1):
pass
class I3(I2):
pass
self.assertTrue(I3.get('a') is I1.get('a'))
I2.__bases__ = (Interface,)
self.assertTrue(I3.get('a') is None)
def test___call___defers_to___conform___(self):
from zope.interface import Interface
from zope.interface import implementer
class I(Interface):
pass
@implementer(I)
class C(object):
def __conform__(self, proto):
return 0
self.assertEqual(I(C()), 0)
def test___call___object_implements(self):
from zope.interface import Interface
from zope.interface import implementer
class I(Interface):
pass
@implementer(I)
class C(object):
pass
c = C()
self.assertTrue(I(c) is c)
def test___call___miss_wo_alternate(self):
from zope.interface import Interface
class I(Interface):
pass
class C(object):
pass
c = C()
self.assertRaises(TypeError, I, c)
def test___call___miss_w_alternate(self):
from zope.interface import Interface
class I(Interface):
pass
class C(object):
pass
c = C()
self.assertTrue(I(c, self) is self)
def test___call___w_adapter_hook(self):
from zope.interface import Interface
from zope.interface.interface import adapter_hooks
old_hooks = adapter_hooks[:]
def _miss(iface, obj):
pass
def _hit(iface, obj):
return self
class I(Interface):
pass
class C(object):
pass
c = C()
old_adapter_hooks = adapter_hooks[:]
adapter_hooks[:] = [_miss, _hit]
try:
self.assertTrue(I(c) is self)
finally:
adapter_hooks[:] = old_adapter_hooks
class AttributeTests(ElementTests):
DEFAULT_NAME = 'TestAttribute'
def _getTargetClass(self):
from zope.interface.interface import Attribute
return Attribute
class MethodTests(AttributeTests):
DEFAULT_NAME = 'TestMethod'
def _getTargetClass(self):
from zope.interface.interface import Method
return Method
def test_optional_as_property(self):
method = self._makeOne()
self.assertEqual(method.optional, {})
method.optional = {'foo': 'bar'}
self.assertEqual(method.optional, {'foo': 'bar'})
del method.optional
self.assertEqual(method.optional, {})
def test___call___raises_BrokenImplementation(self):
from zope.interface.exceptions import BrokenImplementation
method = self._makeOne()
try:
method()
except BrokenImplementation as e:
self.assertEqual(e.interface, None)
self.assertEqual(e.name, self.DEFAULT_NAME)
else:
self.fail('__call__ should raise BrokenImplementation')
def test_getSignatureInfo_bare(self):
method = self._makeOne()
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), [])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], None)
def test_getSignatureString_bare(self):
method = self._makeOne()
self.assertEqual(method.getSignatureString(), '()')
def test_getSignatureString_w_only_required(self):
method = self._makeOne()
method.positional = method.required = ['foo']
self.assertEqual(method.getSignatureString(), '(foo)')
def test_getSignatureString_w_optional(self):
method = self._makeOne()
method.positional = method.required = ['foo']
method.optional = {'foo': 'bar'}
self.assertEqual(method.getSignatureString(), "(foo='bar')")
def test_getSignatureString_w_varargs(self):
method = self._makeOne()
method.varargs = 'args'
self.assertEqual(method.getSignatureString(), "(*args)")
def test_getSignatureString_w_kwargs(self):
method = self._makeOne()
method.kwargs = 'kw'
self.assertEqual(method.getSignatureString(), "(**kw)")
class Test_fromFunction(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.interface.interface import fromFunction
return fromFunction(*args, **kw)
def test_bare(self):
def _func():
"DOCSTRING"
method = self._callFUT(_func)
self.assertEqual(method.getName(), '_func')
self.assertEqual(method.getDoc(), 'DOCSTRING')
self.assertEqual(method.interface, None)
self.assertEqual(list(method.getTaggedValueTags()), [])
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), [])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], None)
def test_w_interface(self):
from zope.interface.interface import InterfaceClass
class IFoo(InterfaceClass):
pass
def _func():
"DOCSTRING"
method = self._callFUT(_func, interface=IFoo)
self.assertEqual(method.interface, IFoo)
def test_w_name(self):
def _func():
"DOCSTRING"
method = self._callFUT(_func, name='anotherName')
self.assertEqual(method.getName(), 'anotherName')
def test_w_only_required(self):
def _func(foo):
"DOCSTRING"
method = self._callFUT(_func)
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), ['foo'])
self.assertEqual(list(info['required']), ['foo'])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], None)
def test_w_optional(self):
def _func(foo='bar'):
"DOCSTRING"
method = self._callFUT(_func)
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), ['foo'])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {'foo': 'bar'})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], None)
def test_w_optional_self(self):
# XXX This is a weird case, trying to cover the following code in
# FUT::
#
# nr = na-len(defaults)
# if nr < 0:
# defaults=defaults[-nr:]
# nr = 0
def _func(self='bar'):
"DOCSTRING"
method = self._callFUT(_func, imlevel=1)
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), [])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], None)
def test_w_varargs(self):
def _func(*args):
"DOCSTRING"
method = self._callFUT(_func)
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), [])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], 'args')
self.assertEqual(info['kwargs'], None)
def test_w_kwargs(self):
def _func(**kw):
"DOCSTRING"
method = self._callFUT(_func)
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), [])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], 'kw')
def test_full_spectrum(self):
def _func(foo, bar='baz', *args, **kw):
"DOCSTRING"
method = self._callFUT(_func)
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), ['foo', 'bar'])
self.assertEqual(list(info['required']), ['foo'])
self.assertEqual(info['optional'], {'bar': 'baz'})
self.assertEqual(info['varargs'], 'args')
self.assertEqual(info['kwargs'], 'kw')
class Test_fromMethod(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.interface.interface import fromMethod
return fromMethod(*args, **kw)
def test_no_args(self):
class Foo(object):
def bar(self):
"DOCSTRING"
method = self._callFUT(Foo.bar)
self.assertEqual(method.getName(), 'bar')
self.assertEqual(method.getDoc(), 'DOCSTRING')
self.assertEqual(method.interface, None)
self.assertEqual(list(method.getTaggedValueTags()), [])
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), [])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], None)
def test_full_spectrum(self):
class Foo(object):
def bar(self, foo, bar='baz', *args, **kw):
"DOCSTRING"
method = self._callFUT(Foo.bar)
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), ['foo', 'bar'])
self.assertEqual(list(info['required']), ['foo'])
self.assertEqual(info['optional'], {'bar': 'baz'})
self.assertEqual(info['varargs'], 'args')
self.assertEqual(info['kwargs'], 'kw')
def test_w_non_method(self):
def foo():
"DOCSTRING"
method = self._callFUT(foo)
self.assertEqual(method.getName(), 'foo')
self.assertEqual(method.getDoc(), 'DOCSTRING')
self.assertEqual(method.interface, None)
self.assertEqual(list(method.getTaggedValueTags()), [])
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), [])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], None)
class DummyDependent(object):
def __init__(self):
self._changed = []
def changed(self, originally_changed):
self._changed.append(originally_changed)
def _barGreaterThanFoo(obj):
from zope.interface.exceptions import Invalid
foo = getattr(obj, 'foo', None)
bar = getattr(obj, 'bar', None)
if foo is not None and isinstance(foo, type(bar)):
# type checking should be handled elsewhere (like, say,
# schema); these invariants should be intra-interface
# constraints. This is a hacky way to do it, maybe, but you
# get the idea
if not bar > foo:
raise Invalid('Please, Boo MUST be greater than Foo!')
def _ifFooThenBar(obj):
from zope.interface.exceptions import Invalid
if getattr(obj, 'foo', None) and not getattr(obj, 'bar', None):
raise Invalid('If Foo, then Bar!')
class _Monkey(object):
# context-manager for replacing module names in the scope of a test.
def __init__(self, module, **kw):
self.module = module
self.to_restore = dict([(key, getattr(module, key)) for key in kw])
for key, value in kw.items():
setattr(module, key, value)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for key, value in self.to_restore.items():
setattr(self.module, key, value)
|
|
# Copyright (C) 2007 Alexandre Conrad, alexandre (dot) conrad (at) gmail (dot) com
#
# This module is part of FormAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import cgi
import warnings
import logging
logger = logging.getLogger('formalchemy.' + __name__)
MIN_SA_VERSION = '0.4.5'
from sqlalchemy import __version__
if __version__.split('.') < MIN_SA_VERSION.split('.'):
raise ImportError('Version %s or later of SQLAlchemy required' % MIN_SA_VERSION)
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm.properties import SynonymProperty
from sqlalchemy.orm import compile_mappers, object_session, class_mapper
from sqlalchemy.orm.session import Session
from sqlalchemy.orm.scoping import ScopedSession
from sqlalchemy.orm.dynamic import DynamicAttributeImpl
from sqlalchemy.util import OrderedDict
try:
from sqlalchemy.orm.exc import UnmappedInstanceError
except ImportError:
class UnmappedInstanceError(Exception):
"""
Exception to provide support for sqlalchemy < 0.6
"""
import fields, fatypes
compile_mappers() # initializes InstrumentedAttributes
try:
# 0.5
from sqlalchemy.orm.attributes import manager_of_class
def _get_attribute(cls, p):
manager = manager_of_class(cls)
return manager[p.key]
except ImportError:
# 0.4
def _get_attribute(cls, p):
return getattr(cls, p.key)
def prettify(text):
"""
Turn an attribute name into something prettier, for a default label where none is given.
>>> prettify("my_column_name")
'My column name'
"""
return text.replace("_", " ").capitalize()
class SimpleMultiDict(dict):
"""
Adds `getone`, `getall` methods to dict. Assumes that values are either
a string or a list of strings.
"""
def getone(self, key):
if key not in self:
raise KeyError(key)
v = dict.get(self, key)
if v is None or isinstance(v, basestring) or isinstance(v, cgi.FieldStorage):
return v
return v[0]
def getall(self, key):
v = dict.get(self, key)
if v is None:
return []
elif isinstance(v, basestring):
return [v]
return v
class ModelRenderer(object):
"""
The `ModelRenderer` class is the superclass for all classes needing to deal
with `model` access and supporting rendering capabilities.
"""
prettify = staticmethod(prettify)
def __init__(self, model, session=None, data=None, prefix=None):
"""
- `model`:
a SQLAlchemy mapped class or instance. New object creation
should be done by passing the class, which will need a default
(no-parameter) constructor. After construction or binding of
the :class:`~formalchemy.forms.FieldSet`, the instantiated object will be available as
the `.model` attribute.
- `session=None`:
the session to use for queries (for relations). If `model` is associated
with a session, that will be used by default. (Objects mapped with a
`scoped_session
<http://www.sqlalchemy.org/docs/05/session.html#contextual-thread-local-sessions>`_
will always have a session. Other objects will
also have a session if they were loaded by a Query.)
- `data=None`:
dictionary-like object of user-submitted data to validate and/or
sync to the `model`. Scalar attributes should have a single
value in the dictionary; multi-valued relations should have a
list, even if there are zero or one values submitted. Currently,
pylons request.params() objects and plain dictionaries are known
to work.
- `prefix=None`:
the prefix to prepend to html name attributes. This is useful to avoid
field name conflicts when there are two fieldsets creating objects
from the same model in one html page. (This is not needed when
editing existing objects, since the object primary key is used as part
of the field name.)
Only the `model` parameter is required.
After binding, :class:`~formalchemy.forms.FieldSet`'s `model` attribute will always be an instance.
If you bound to a class, `FormAlchemy` will call its constructor with no
arguments to create an appropriate instance.
.. NOTE::
This instance will not be added to the current session, even if you are using `Session.mapper`.
All of these parameters may be overridden by the `bind` or `rebind`
methods. The `bind` method returns a new instance bound as specified,
while `rebind` modifies the current :class:`~formalchemy.forms.FieldSet` and has
no return value. (You may not `bind` to a different type of SQLAlchemy
model than the initial one -- if you initially bind to a `User`, you
must subsequently bind `User`'s to that :class:`~formalchemy.forms.FieldSet`.)
Typically, you will configure a :class:`~formalchemy.forms.FieldSet` once in
your common form library, then `bind` specific instances later for editing. (The
`bind` method is thread-safe; `rebind` is not.) Thus:
load stuff:
>>> from formalchemy.tests import FieldSet, User, session
now, in `library.py`
>>> fs = FieldSet(User)
>>> fs.configure(options=[]) # put all configuration stuff here
and in `controller.py`
>>> from library import fs
>>> user = session.query(User).first()
>>> fs2 = fs.bind(user)
>>> html = fs2.render()
The `render_fields` attribute is an OrderedDict of all the `Field`'s
that have been configured, keyed by name. The order of the fields
is the order in `include`, or the order they were declared
in the SQLAlchemy model class if no `include` is specified.
The `_fields` attribute is an OrderedDict of all the `Field`'s
the ModelRenderer knows about, keyed by name, in their
unconfigured state. You should not normally need to access
`_fields` directly.
(Note that although equivalent `Field`'s (fields referring to
the same attribute on the SQLAlchemy model) will equate with
the == operator, they are NOT necessarily the same `Field`
instance. Stick to referencing `Field`'s from their parent
`FieldSet` to always get the "right" instance.)
"""
self._fields = OrderedDict()
self._render_fields = OrderedDict()
self.model = self.session = None
self.prefix = prefix
if not model:
raise Exception('model parameter may not be None')
self._original_cls = isinstance(model, type) and model or type(model)
ModelRenderer.rebind(self, model, session, data)
cls = isinstance(self.model, type) and self.model or type(self.model)
try:
class_mapper(cls)
except:
# this class is not managed by SA. extract any raw Fields defined on it.
keys = cls.__dict__.keys()
keys.sort(lambda a, b: cmp(a.lower(), b.lower())) # 2.3 support
for key in keys:
field = cls.__dict__[key]
if isinstance(field, fields.Field):
if field.name and field.name != key:
raise Exception('Fields in a non-mapped class have the same name as their attribute. Do not manually give them a name.')
field.name = field.key = key
self.append(field)
if not self._fields:
raise Exception("not bound to a SA instance, and no manual Field definitions found")
else:
# SA class.
# load synonyms so we can ignore them
synonyms = set(p for p in class_mapper(cls).iterate_properties
if isinstance(p, SynonymProperty))
# load discriminators so we can ignore them
discs = set(p for p in class_mapper(cls).iterate_properties
if hasattr(p, '_is_polymorphic_discriminator')
and p._is_polymorphic_discriminator)
# attributes we're interested in
attrs = []
for p in class_mapper(cls).iterate_properties:
attr = _get_attribute(cls, p)
if ((isinstance(p, SynonymProperty) or attr.property.key not in (s.name for s in synonyms))
and not isinstance(attr.impl, DynamicAttributeImpl)
and p not in discs):
attrs.append(attr)
# sort relations last before storing in the OrderedDict
L = [fields.AttributeField(attr, self) for attr in attrs]
L.sort(lambda a, b: cmp(a.is_relation, b.is_relation)) # note, key= not used for 2.3 support
self._fields.update((field.key, field) for field in L)
def append(self, field):
"""Add a form Field. By default, this Field will be included in the rendered form or table."""
if not isinstance(field, fields.Field) and not isinstance(field, fields.AttributeField):
raise ValueError('Can only add Field or AttributeField objects; got %s instead' % field)
field.parent = self
_fields = self._render_fields or self._fields
_fields[field.name] = field
def add(self, field):
warnings.warn(DeprecationWarning('FieldSet.add is deprecated. Use FieldSet.append instead. Your validator will break in FA 1.5'))
self.append(field)
def extend(self, fields):
"""Add a list of fields. By default, each Field will be included in the
rendered form or table."""
for field in fields:
self.append(field)
def insert(self, field, new_field):
"""Insert a new field *before* an existing field.
This is like the normal ``insert()`` function of ``list`` objects. It
takes the place of the previous element, and pushes the rest forward.
"""
fields_ = self._render_fields or self._fields
if not isinstance(new_field, fields.Field):
raise ValueError('Can only add Field objects; got %s instead' % field)
if isinstance(field, fields.AbstractField):
try:
index = fields_.keys().index(field.name)
except ValueError:
raise ValueError('%s not in fields' % field.name)
else:
raise TypeError('field must be a Field. Got %r' % field)
new_field.parent = self
items = list(fields_.iteritems()) # prepare for Python 3
items.insert(index, (new_field.name, new_field))
if self._render_fields:
self._render_fields = OrderedDict(items)
else:
self._fields = OrderedDict(items)
def insert_after(self, field, new_field):
"""Insert a new field *after* an existing field.
Use this if your business logic requires to add after a certain field,
and not before.
"""
fields_ = self._render_fields or self._fields
if not isinstance(new_field, fields.Field):
raise ValueError('Can only add Field objects; got %s instead' % field)
if isinstance(field, fields.AbstractField):
try:
index = fields_.keys().index(field.name)
except ValueError:
raise ValueError('%s not in fields' % field.name)
else:
raise TypeError('field must be a Field. Got %r' % field)
new_field.parent = self
items = list(fields_.iteritems())
new_item = (new_field.name, new_field)
if index + 1 == len(items): # after the last element ?
items.append(new_item)
else:
items.insert(index + 1, new_item)
if self._render_fields:
self._render_fields = OrderedDict(items)
else:
self._fields = OrderedDict(items)
@property
def render_fields(self):
"""
The set of attributes that will be rendered, as a (ordered)
dict of `{fieldname: Field}` pairs
"""
if not self._render_fields:
self._render_fields = OrderedDict([(field.key, field) for field in self._get_fields()])
return self._render_fields
def configure(self, pk=False, exclude=[], include=[], options=[]):
"""
The `configure` method specifies a set of attributes to be rendered.
By default, all attributes are rendered except primary keys and
foreign keys. But, relations `based on` foreign keys `will` be
rendered. For example, if an `Order` has a `user_id` FK and a `user`
relation based on it, `user` will be rendered (as a select box of
`User`'s, by default) but `user_id` will not.
Parameters:
* `pk=False`:
set to True to include primary key columns
* `exclude=[]`:
an iterable of attributes to exclude. Other attributes will
be rendered normally
* `include=[]`:
an iterable of attributes to include. Other attributes will
not be rendered
* `options=[]`:
an iterable of modified attributes. The set of attributes to
be rendered is unaffected
* `global_validator=None`:
global_validator` should be a function that performs
validations that need to know about the entire form.
* `focus=True`:
the attribute (e.g., `fs.orders`) whose rendered input element
gets focus. Default value is True, meaning, focus the first
element. False means do not focus at all.
Only one of {`include`, `exclude`} may be specified.
Note that there is no option to include foreign keys. This is
deliberate. Use `include` if you really need to manually edit FKs.
If `include` is specified, fields will be rendered in the order given
in `include`. Otherwise, fields will be rendered in alphabetical
order.
Examples: given a `FieldSet` `fs` bound to a `User` instance as a
model with primary key `id` and attributes `name` and `email`, and a
relation `orders` of related Order objects, the default will be to
render `name`, `email`, and `orders`. To render the orders list as
checkboxes instead of a select, you could specify::
>>> from formalchemy.tests import FieldSet, User
>>> fs = FieldSet(User)
>>> fs.configure(options=[fs.orders.checkbox()])
To render only name and email,
>>> fs.configure(include=[fs.name, fs.email])
or
>>> fs.configure(exclude=[fs.orders])
Of course, you can include modifications to a field in the `include`
parameter, such as here, to render name and options-as-checkboxes:
>>> fs.configure(include=[fs.name, fs.orders.checkbox()])
"""
self._render_fields = OrderedDict([(field.key, field) for field in self._get_fields(pk, exclude, include, options)])
def bind(self, model=None, session=None, data=None):
"""
Return a copy of this FieldSet or Grid, bound to the given
`model`, `session`, and `data`. The parameters to this method are the
same as in the constructor.
Often you will create and `configure` a FieldSet or Grid at application
startup, then `bind` specific instances to it for actual editing or display.
"""
if not (model or session or data):
raise Exception('must specify at least one of {model, session, data}')
if not model:
if not self.model:
raise Exception('model must be specified when none is already set')
model = fields._pk(self.model) is None and type(self.model) or self.model
# copy.copy causes a stacktrace on python 2.5.2/OSX + pylons. unable to reproduce w/ simpler sample.
mr = object.__new__(self.__class__)
mr.__dict__ = dict(self.__dict__)
# two steps so bind's error checking can work
ModelRenderer.rebind(mr, model, session, data)
mr._fields = OrderedDict([(key, renderer.bind(mr)) for key, renderer in self._fields.iteritems()])
if self._render_fields:
mr._render_fields = OrderedDict([(field.key, field) for field in
[field.bind(mr) for field in self._render_fields.itervalues()]])
return mr
def copy(self, *args):
"""return a copy of the fieldset. args is a list of field names or field
objects to render in the new fieldset"""
mr = self.bind(self.model, self.session, self.data)
_fields = self._render_fields or self._fields
_new_fields = []
if args:
for field in args:
if isinstance(field, basestring):
if field in _fields:
field = _fields.get(field)
else:
raise AttributeError('%r as not field named %s' % (self, field))
assert isinstance(field, fields.AbstractField), field
field.bind(mr)
_new_fields.append(field)
mr._render_fields = OrderedDict([(field.key, field) for field in _new_fields])
return mr
def rebind(self, model=None, session=None, data=None):
"""
Like `bind`, but acts on this instance. No return value.
Not all parameters are treated the same; specifically, what happens if they are NOT specified is different:
* if `model` is not specified, the old model is used
* if `session` is not specified, FA tries to re-guess session from the model
* if data is not specified, it is rebound to None.
"""
original_model = model
if model:
if isinstance(model, type):
try:
model = model()
except Exception, e:
model_error = str(e)
msg = ("%s appears to be a class, not an instance, but "
"FormAlchemy cannot instantiate it. "
"(Make sure all constructor parameters are "
"optional!). The error was:\n%s")
raise Exception(msg % (model, model_error))
# take object out of session, if present
try:
_obj_session = object_session(model)
except (AttributeError, UnmappedInstanceError):
pass # non-SA object; doesn't need session
else:
if _obj_session:
_obj_session.expunge(model)
else:
try:
session_ = object_session(model)
except:
# non SA class
if fields._pk(model) is None:
error = ('Mapped instances to be bound must either have '
'a primary key set or not be in a Session. When '
'creating a new object, bind the class instead '
'[i.e., bind(User), not bind(User())]')
raise Exception(error)
else:
if session_:
# for instances of mapped classes, require that the instance
# have a PK already
try:
class_mapper(type(model))
except:
pass
else:
if fields._pk(model) is None:
error = ('Mapped instances to be bound must either have '
'a primary key set or not be in a Session. When '
'creating a new object, bind the class instead '
'[i.e., bind(User), not bind(User())]')
raise Exception(error)
if (self.model and type(self.model) != type(model) and
not issubclass(model.__class__, self._original_cls)):
raise ValueError('You can only bind to another object of the same type or subclass you originally bound to (%s), not %s' % (type(self.model), type(model)))
self.model = model
self._bound_pk = fields._pk(model)
if data is None:
self.data = None
elif hasattr(data, 'getall') and hasattr(data, 'getone'):
self.data = data
else:
try:
self.data = SimpleMultiDict(data)
except:
raise Exception('unsupported data object %s. currently only dicts and Paste multidicts are supported' % self.data)
if session:
if not isinstance(session, Session) and not isinstance(session, ScopedSession):
raise ValueError('Invalid SQLAlchemy session object %s' % session)
self.session = session
elif model:
if '_obj_session' in locals():
# model may be a temporary object, expunged from its session -- grab the existing reference
self.session = _obj_session
else:
try:
o_session = object_session(model)
except (AttributeError, UnmappedInstanceError):
pass # non-SA object
else:
if o_session:
self.session = o_session
# if we didn't just instantiate (in which case object_session will be None),
# the session should be the same as the object_session
if self.session and model == original_model:
try:
o_session = object_session(self.model)
except (AttributeError, UnmappedInstanceError):
pass # non-SA object
else:
if o_session and self.session is not o_session:
raise Exception('You may not explicitly bind to a session when your model already belongs to a different one')
def sync(self):
"""
Sync (copy to the corresponding attributes) the data passed to the constructor or `bind` to the `model`.
"""
if self.data is None:
raise Exception("No data bound; cannot sync")
for field in self.render_fields.itervalues():
field.sync()
if self.session:
self.session.add(self.model)
def _raw_fields(self):
return self._fields.values()
def _get_fields(self, pk=False, exclude=[], include=[], options=[]):
# sanity check
if include and exclude:
raise Exception('Specify at most one of include, exclude')
# help people who meant configure(include=[X]) but just wrote configure(X), resulting in pk getting the positional argument
if pk not in [True, False]:
raise ValueError('pk option must be True or False, not %s' % pk)
# verify that options that should be lists of Fields, are
for iterable in ['include', 'exclude', 'options']:
try:
L = list(eval(iterable))
except:
raise ValueError('`%s` parameter should be an iterable' % iterable)
for field in L:
if not isinstance(field, fields.AbstractField):
raise TypeError('non-AbstractField object `%s` found in `%s`' % (field, iterable))
if field not in self._fields.values():
raise ValueError('Unrecognized Field `%s` in `%s` -- did you mean to call append() first?' % (field, iterable))
# if include is given, those are the fields used. otherwise, include those not explicitly (or implicitly) excluded.
if not include:
ignore = list(exclude) # don't modify `exclude` directly to avoid surprising caller
if not pk:
ignore.extend([wrapper for wrapper in self._raw_fields() if wrapper.is_pk and not wrapper.is_collection])
ignore.extend([wrapper for wrapper in self._raw_fields() if wrapper.is_raw_foreign_key])
include = [field for field in self._raw_fields() if field not in ignore]
# in the returned list, replace any fields in `include` w/ the corresponding one in `options`, if present.
# this is a bit clunky because we want to
# 1. preserve the order given in `include`
# 2. not modify `include` (or `options`) directly; that could surprise the caller
options_dict = {} # create + update for 2.3's benefit
options_dict.update(dict([(wrapper, wrapper) for wrapper in options]))
L = []
for wrapper in include:
if wrapper in options_dict:
L.append(options_dict[wrapper])
else:
L.append(wrapper)
return L
def __getattr__(self, attrname):
try:
return self._render_fields[attrname]
except KeyError:
try:
return self._fields[attrname]
except KeyError:
raise AttributeError(attrname)
__getitem__ = __getattr__
def __setattr__(self, attrname, value):
if attrname not in ('_fields', '__dict__', 'focus', 'model', 'session', 'data') and \
(attrname in self._fields or isinstance(value, fields.AbstractField)):
raise AttributeError('Do not set field attributes manually. Use append() or configure() instead')
object.__setattr__(self, attrname, value)
def __delattr__(self, attrname):
if attrname in self._render_fields:
del self._render_fields[attrname]
elif attrname in self._fields:
raise RuntimeError("You try to delete a field but your form is not configured")
else:
raise AttributeError("field %s does not exist" % attrname)
__delitem__ = __delattr__
def render(self, **kwargs):
raise NotImplementedError()
class EditableRenderer(ModelRenderer):
default_renderers = {
fatypes.String: fields.TextFieldRenderer,
fatypes.Unicode: fields.TextFieldRenderer,
fatypes.Text: fields.TextFieldRenderer,
fatypes.Integer: fields.IntegerFieldRenderer,
fatypes.Float: fields.FloatFieldRenderer,
fatypes.Numeric: fields.FloatFieldRenderer,
fatypes.Interval: fields.IntervalFieldRenderer,
fatypes.Boolean: fields.CheckBoxFieldRenderer,
fatypes.DateTime: fields.DateTimeFieldRenderer,
fatypes.Date: fields.DateFieldRenderer,
fatypes.Time: fields.TimeFieldRenderer,
fatypes.LargeBinary: fields.FileFieldRenderer,
fatypes.List: fields.SelectFieldRenderer,
fatypes.Set: fields.SelectFieldRenderer,
'dropdown': fields.SelectFieldRenderer,
'checkbox': fields.CheckBoxSet,
'radio': fields.RadioSet,
'password': fields.PasswordFieldRenderer,
'textarea': fields.TextAreaFieldRenderer,
}
|
|
# Configuration file for ipython.
# ------------------------------------------------------------------------------
# InteractiveShellApp(Configurable) configuration
# ------------------------------------------------------------------------------
## A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
## Execute the given command string.
# Default: ''
# c.InteractiveShellApp.code_to_run = ''
## Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# Default: True
# c.InteractiveShellApp.exec_PYTHONSTARTUP = True
## List of files to run at IPython startup.
# Default: []
# c.InteractiveShellApp.exec_files = []
## lines of code to run at IPython startup.
# Default: []
# c.InteractiveShellApp.exec_lines = []
## A list of dotted module names of IPython extensions to load.
# Default: []
# c.InteractiveShellApp.extensions = []
c.InteractiveShellApp.extensions = ["grasp", "autoreload"]
## dotted module name of an IPython extension to load.
# Default: ''
# c.InteractiveShellApp.extra_extension = ''
## A file to be run
# Default: ''
# c.InteractiveShellApp.file_to_run = ''
## Enable GUI event loop integration with any of ('asyncio', 'glut', 'gtk',
# 'gtk2', 'gtk3', 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2',
# 'qt4').
# Choices: any of ['asyncio', 'glut', 'gtk', 'gtk2', 'gtk3', 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2', 'qt4'] (case-insensitive) or None
# Default: None
# c.InteractiveShellApp.gui = None
## Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# Default: True
# c.InteractiveShellApp.hide_initial_ns = True
## If True, IPython will not add the current working directory to sys.path. When
# False, the current working directory is added to sys.path, allowing imports of
# modules defined in the current directory.
# Default: False
# c.InteractiveShellApp.ignore_cwd = False
## Configure matplotlib for interactive use with the default matplotlib backend.
# Choices: any of ['auto', 'agg', 'gtk', 'gtk3', 'inline', 'ipympl', 'nbagg', 'notebook', 'osx', 'pdf', 'ps', 'qt', 'qt4', 'qt5', 'svg', 'tk', 'widget', 'wx'] (case-insensitive) or None
# Default: None
# c.InteractiveShellApp.matplotlib = None
## Run the module as a script.
# Default: ''
# c.InteractiveShellApp.module_to_run = ''
## Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# Choices: any of ['auto', 'agg', 'gtk', 'gtk3', 'inline', 'ipympl', 'nbagg', 'notebook', 'osx', 'pdf', 'ps', 'qt', 'qt4', 'qt5', 'svg', 'tk', 'widget', 'wx'] (case-insensitive) or None
# Default: None
# c.InteractiveShellApp.pylab = None
## If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# Default: True
# c.InteractiveShellApp.pylab_import_all = True
## Reraise exceptions encountered loading IPython extensions?
# Default: False
# c.InteractiveShellApp.reraise_ipython_extension_failures = False
# ------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
# ------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
# Default: '%Y-%m-%d %H:%M:%S'
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
# Default: '[%(name)s]%(highlevel)s %(message)s'
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
# Choices: any of [0, 10, 20, 30, 40, 50, 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL']
# Default: 30
# c.Application.log_level = 30
## Instead of starting the Application, dump configuration to stdout
# Default: False
# c.Application.show_config = False
## Instead of starting the Application, dump configuration to stdout (as JSON)
# Default: False
# c.Application.show_config_json = False
# ------------------------------------------------------------------------------
# BaseIPythonApplication(Application) configuration
# ------------------------------------------------------------------------------
## IPython: an enhanced interactive Python shell.
## Whether to create profile dir if it doesn't exist
# Default: False
# c.BaseIPythonApplication.auto_create = False
## Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# Default: False
# c.BaseIPythonApplication.copy_config_files = False
## Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# Default: ''
# c.BaseIPythonApplication.extra_config_file = ''
## The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# Default: ''
# c.BaseIPythonApplication.ipython_dir = ''
## The date format used by logging formatters for %(asctime)s
# See also: Application.log_datefmt
# c.BaseIPythonApplication.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
# See also: Application.log_format
# c.BaseIPythonApplication.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
# See also: Application.log_level
# c.BaseIPythonApplication.log_level = 30
## Whether to overwrite existing config files when copying
# Default: False
# c.BaseIPythonApplication.overwrite = False
## The IPython profile to use.
# Default: 'default'
# c.BaseIPythonApplication.profile = 'default'
## Instead of starting the Application, dump configuration to stdout
# See also: Application.show_config
# c.BaseIPythonApplication.show_config = False
## Instead of starting the Application, dump configuration to stdout (as JSON)
# See also: Application.show_config_json
# c.BaseIPythonApplication.show_config_json = False
## Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# Default: False
# c.BaseIPythonApplication.verbose_crash = False
# ------------------------------------------------------------------------------
# TerminalIPythonApp(BaseIPythonApplication, InteractiveShellApp) configuration
# ------------------------------------------------------------------------------
## Execute the given command string.
# See also: InteractiveShellApp.code_to_run
# c.TerminalIPythonApp.code_to_run = ''
## Whether to install the default config files into the profile dir.
# See also: BaseIPythonApplication.copy_config_files
# c.TerminalIPythonApp.copy_config_files = False
c.TerminalIPythonApp.display_banner = False
## Run the file referenced by the PYTHONSTARTUP environment
# See also: InteractiveShellApp.exec_PYTHONSTARTUP
# c.TerminalIPythonApp.exec_PYTHONSTARTUP = True
## List of files to run at IPython startup.
# See also: InteractiveShellApp.exec_files
# c.TerminalIPythonApp.exec_files = []
## lines of code to run at IPython startup.
# See also: InteractiveShellApp.exec_lines
c.TerminalIPythonApp.exec_lines = ["%autoreload 2"]
## A list of dotted module names of IPython extensions to load.
# See also: InteractiveShellApp.extensions
# c.TerminalIPythonApp.extensions = []
## Path to an extra config file to load.
# See also: BaseIPythonApplication.extra_config_file
# c.TerminalIPythonApp.extra_config_file = ''
## dotted module name of an IPython extension to load.
# See also: InteractiveShellApp.extra_extension
# c.TerminalIPythonApp.extra_extension = ''
## A file to be run
# See also: InteractiveShellApp.file_to_run
# c.TerminalIPythonApp.file_to_run = ''
## If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# Default: False
# c.TerminalIPythonApp.force_interact = False
## Enable GUI event loop integration with any of ('asyncio', 'glut', 'gtk',
# 'gtk2', 'gtk3', 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2',
# 'qt4').
# See also: InteractiveShellApp.gui
# c.TerminalIPythonApp.gui = None
## Should variables loaded at startup (by startup files, exec_lines, etc.)
# See also: InteractiveShellApp.hide_initial_ns
# c.TerminalIPythonApp.hide_initial_ns = True
## If True, IPython will not add the current working directory to sys.path.
# See also: InteractiveShellApp.ignore_cwd
# c.TerminalIPythonApp.ignore_cwd = False
## Class to use to instantiate the TerminalInteractiveShell object. Useful for
# custom Frontends
# Default: 'IPython.terminal.interactiveshell.TerminalInteractiveShell'
# c.TerminalIPythonApp.interactive_shell_class = 'IPython.terminal.interactiveshell.TerminalInteractiveShell'
##
# See also: BaseIPythonApplication.ipython_dir
# c.TerminalIPythonApp.ipython_dir = ''
## The date format used by logging formatters for %(asctime)s
# See also: Application.log_datefmt
# c.TerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
# See also: Application.log_format
# c.TerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
# See also: Application.log_level
# c.TerminalIPythonApp.log_level = 30
## Configure matplotlib for interactive use with
# See also: InteractiveShellApp.matplotlib
# c.TerminalIPythonApp.matplotlib = None
## Run the module as a script.
# See also: InteractiveShellApp.module_to_run
# c.TerminalIPythonApp.module_to_run = ''
## Whether to overwrite existing config files when copying
# See also: BaseIPythonApplication.overwrite
# c.TerminalIPythonApp.overwrite = False
## The IPython profile to use.
# See also: BaseIPythonApplication.profile
# c.TerminalIPythonApp.profile = 'default'
## Pre-load matplotlib and numpy for interactive use,
# See also: InteractiveShellApp.pylab
# c.TerminalIPythonApp.pylab = None
## If true, IPython will populate the user namespace with numpy, pylab, etc.
# See also: InteractiveShellApp.pylab_import_all
# c.TerminalIPythonApp.pylab_import_all = True
## Start IPython quickly by skipping the loading of config files.
# Default: False
# c.TerminalIPythonApp.quick = False
## Reraise exceptions encountered loading IPython extensions?
# See also: InteractiveShellApp.reraise_ipython_extension_failures
# c.TerminalIPythonApp.reraise_ipython_extension_failures = False
## Instead of starting the Application, dump configuration to stdout
# See also: Application.show_config
# c.TerminalIPythonApp.show_config = False
## Instead of starting the Application, dump configuration to stdout (as JSON)
# See also: Application.show_config_json
# c.TerminalIPythonApp.show_config_json = False
## Create a massive crash report when IPython encounters what may be an
# See also: BaseIPythonApplication.verbose_crash
# c.TerminalIPythonApp.verbose_crash = False
# ------------------------------------------------------------------------------
# InteractiveShell(SingletonConfigurable) configuration
# ------------------------------------------------------------------------------
## An enhanced, interactive shell for Python.
## 'all', 'last', 'last_expr' or 'none', 'last_expr_or_assign' specifying which
# nodes should be run interactively (displaying output from expressions).
# Choices: any of ['all', 'last', 'last_expr', 'none', 'last_expr_or_assign']
# Default: 'last_expr'
# c.InteractiveShell.ast_node_interactivity = 'last_expr'
## A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# Default: []
# c.InteractiveShell.ast_transformers = []
## Automatically run await statement in the top level repl.
# Default: True
# c.InteractiveShell.autoawait = True
## Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# Choices: any of [0, 1, 2]
# Default: 0
# c.InteractiveShell.autocall = 0
## Autoindent IPython code entered interactively.
# Default: True
# c.InteractiveShell.autoindent = True
## Enable magic commands to be called without the leading %.
# Default: True
# c.InteractiveShell.automagic = True
## The part of the banner to be printed before the profile
# Default: "Python 3.7.3 (default, Dec 20 2019, 17:49:37) \nType 'copyright', 'credits' or 'license' for more information\nIPython 7.18.1 -- An enhanced Interactive Python. Type '?' for help.\n"
# c.InteractiveShell.banner1 = "Python 3.7.3 (default, Dec 20 2019, 17:49:37) \nType 'copyright', 'credits' or 'license' for more information\nIPython 7.18.1 -- An enhanced Interactive Python. Type '?' for help.\n"
## The part of the banner to be printed after the profile
# Default: ''
# c.InteractiveShell.banner2 = ''
## Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 3 (if you provide a value
# less than 3, it is reset to 0 and a warning is issued). This limit is defined
# because otherwise you'll spend more time re-flushing a too small cache than
# working
# Default: 1000
# c.InteractiveShell.cache_size = 1000
## Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# Default: True
# c.InteractiveShell.color_info = True
## Set the color scheme (NoColor, Neutral, Linux, or LightBG).
# Choices: any of ['Neutral', 'NoColor', 'LightBG', 'Linux'] (case-insensitive)
# Default: 'Neutral'
# c.InteractiveShell.colors = 'Neutral'
# Default: False
# c.InteractiveShell.debug = False
## Don't call post-execute functions that have failed in the past.
# Default: False
# c.InteractiveShell.disable_failing_post_execute = False
## If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# Default: False
# c.InteractiveShell.display_page = False
## (Provisional API) enables html representation in mime bundles sent to pagers.
# Default: False
# c.InteractiveShell.enable_html_pager = False
## Total length of command history
# Default: 10000
# c.InteractiveShell.history_length = 10000
## The number of saved history entries to be loaded into the history buffer at
# startup.
# Default: 1000
# c.InteractiveShell.history_load_length = 1000
# Default: ''
# c.InteractiveShell.ipython_dir = ''
## Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# Default: ''
# c.InteractiveShell.logappend = ''
## The name of the logfile to use.
# Default: ''
# c.InteractiveShell.logfile = ''
## Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# Default: False
# c.InteractiveShell.logstart = False
## Select the loop runner that will be used to execute top-level asynchronous
# code
# Default: 'IPython.core.interactiveshell._asyncio_runner'
# c.InteractiveShell.loop_runner = 'IPython.core.interactiveshell._asyncio_runner'
# Choices: any of [0, 1, 2]
# Default: 0
# c.InteractiveShell.object_info_string_level = 0
c.InteractiveShell.pdb = False
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# Default: 'In [\\#]: '
# c.InteractiveShell.prompt_in1 = 'In [\\#]: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# Default: ' .\\D.: '
# c.InteractiveShell.prompt_in2 = ' .\\D.: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# Default: 'Out[\\#]: '
# c.InteractiveShell.prompt_out = 'Out[\\#]: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# Default: True
# c.InteractiveShell.prompts_pad_left = True
# Default: False
# c.InteractiveShell.quiet = False
# Default: '\n'
# c.InteractiveShell.separate_in = '\n'
# Default: ''
# c.InteractiveShell.separate_out = ''
# Default: ''
# c.InteractiveShell.separate_out2 = ''
## Show rewritten input, e.g. for autocall.
# Default: True
# c.InteractiveShell.show_rewritten_input = True
## Enables rich html representation of docstrings. (This requires the docrepr
# module).
# Default: False
# c.InteractiveShell.sphinxify_docstring = False
# Default: True
# c.InteractiveShell.wildcards_case_sensitive = True
## Switch modes for the IPython exception handlers.
# Choices: any of ['Context', 'Plain', 'Verbose', 'Minimal'] (case-insensitive)
# Default: 'Context'
# c.InteractiveShell.xmode = 'Context'
# ------------------------------------------------------------------------------
# TerminalInteractiveShell(InteractiveShell) configuration
# ------------------------------------------------------------------------------
##
# See also: InteractiveShell.ast_node_interactivity
# c.TerminalInteractiveShell.ast_node_interactivity = 'last_expr'
##
# See also: InteractiveShell.ast_transformers
# c.TerminalInteractiveShell.ast_transformers = []
##
# See also: InteractiveShell.autoawait
# c.TerminalInteractiveShell.autoawait = True
##
# See also: InteractiveShell.autocall
# c.TerminalInteractiveShell.autocall = 0
## Autoformatter to reformat Terminal code. Can be `'black'` or `None`
# Default: None
# c.TerminalInteractiveShell.autoformatter = None
##
# See also: InteractiveShell.autoindent
# c.TerminalInteractiveShell.autoindent = True
##
# See also: InteractiveShell.automagic
# c.TerminalInteractiveShell.automagic = True
## The part of the banner to be printed before the profile
# See also: InteractiveShell.banner1
# c.TerminalInteractiveShell.banner1 = "Python 3.7.3 (default, Dec 20 2019, 17:49:37) \nType 'copyright', 'credits' or 'license' for more information\nIPython 7.18.1 -- An enhanced Interactive Python. Type '?' for help.\n"
## The part of the banner to be printed after the profile
# See also: InteractiveShell.banner2
# c.TerminalInteractiveShell.banner2 = ''
##
# See also: InteractiveShell.cache_size
# c.TerminalInteractiveShell.cache_size = 1000
##
# See also: InteractiveShell.color_info
# c.TerminalInteractiveShell.color_info = True
## Set the color scheme (NoColor, Neutral, Linux, or LightBG).
# See also: InteractiveShell.colors
# c.TerminalInteractiveShell.colors = 'Neutral'
## Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# Default: True
# c.TerminalInteractiveShell.confirm_exit = True
# See also: InteractiveShell.debug
# c.TerminalInteractiveShell.debug = False
## Don't call post-execute functions that have failed in the past.
# See also: InteractiveShell.disable_failing_post_execute
# c.TerminalInteractiveShell.disable_failing_post_execute = False
## Options for displaying tab completions, 'column', 'multicolumn', and
# 'readlinelike'. These options are for `prompt_toolkit`, see `prompt_toolkit`
# documentation for more information.
# Choices: any of ['column', 'multicolumn', 'readlinelike']
# Default: 'multicolumn'
# c.TerminalInteractiveShell.display_completions = 'multicolumn'
## If True, anything that would be passed to the pager
# See also: InteractiveShell.display_page
# c.TerminalInteractiveShell.display_page = False
## Shortcut style to use at the prompt. 'vi' or 'emacs'.
# Default: 'emacs'
c.TerminalInteractiveShell.editing_mode = "vi"
## Set the editor used by IPython (default to $EDITOR/vi/notepad).
# Default: '/Users/benmezger/.bin/editor'
# c.TerminalInteractiveShell.editor = '/Users/benmezger/.bin/editor'
## Allows to enable/disable the prompt toolkit history search
# Default: True
# c.TerminalInteractiveShell.enable_history_search = True
##
# See also: InteractiveShell.enable_html_pager
# c.TerminalInteractiveShell.enable_html_pager = False
## Enable vi (v) or Emacs (C-X C-E) shortcuts to open an external editor. This is
# in addition to the F2 binding, which is always enabled.
# Default: False
# c.TerminalInteractiveShell.extra_open_editor_shortcuts = False
## Provide an alternative handler to be called when the user presses Return. This
# is an advanced option intended for debugging, which may be changed or removed
# in later releases.
# Default: None
# c.TerminalInteractiveShell.handle_return = None
## Highlight matching brackets.
# Default: True
# c.TerminalInteractiveShell.highlight_matching_brackets = True
## The name or class of a Pygments style to use for syntax highlighting. To see
# available styles, run `pygmentize -L styles`.
# Default: traitlets.Undefined
# c.TerminalInteractiveShell.highlighting_style = traitlets.Undefined
## Override highlighting format for specific tokens
# Default: {}
# c.TerminalInteractiveShell.highlighting_style_overrides = {}
## Total length of command history
# See also: InteractiveShell.history_length
# c.TerminalInteractiveShell.history_length = 10000
##
# See also: InteractiveShell.history_load_length
# c.TerminalInteractiveShell.history_load_length = 1000
# See also: InteractiveShell.ipython_dir
# c.TerminalInteractiveShell.ipython_dir = ''
##
# See also: InteractiveShell.logappend
# c.TerminalInteractiveShell.logappend = ''
##
# See also: InteractiveShell.logfile
# c.TerminalInteractiveShell.logfile = ''
##
# See also: InteractiveShell.logstart
# c.TerminalInteractiveShell.logstart = False
## Select the loop runner that will be used to execute top-level asynchronous
# code
# See also: InteractiveShell.loop_runner
# c.TerminalInteractiveShell.loop_runner = 'IPython.core.interactiveshell._asyncio_runner'
# Default: {}
# c.TerminalInteractiveShell.mime_renderers = {}
## Enable mouse support in the prompt (Note: prevents selecting text with the
# mouse)
# Default: False
# c.TerminalInteractiveShell.mouse_support = False
# See also: InteractiveShell.object_info_string_level
# c.TerminalInteractiveShell.object_info_string_level = 0
##
# See also: InteractiveShell.pdb
# c.TerminalInteractiveShell.pdb = False
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# See also: InteractiveShell.prompt_in1
# c.TerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# See also: InteractiveShell.prompt_in2
# c.TerminalInteractiveShell.prompt_in2 = ' .\\D.: '
## Display the current vi mode (when using vi editing mode).
# Default: True
c.TerminalInteractiveShell.prompt_includes_vi_mode = True
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# See also: InteractiveShell.prompt_out
# c.TerminalInteractiveShell.prompt_out = 'Out[\\#]: '
## Class used to generate Prompt token for prompt_toolkit
# Default: 'IPython.terminal.prompts.Prompts'
# c.TerminalInteractiveShell.prompts_class = 'IPython.terminal.prompts.Prompts'
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# See also: InteractiveShell.prompts_pad_left
# c.TerminalInteractiveShell.prompts_pad_left = True
# See also: InteractiveShell.quiet
# c.TerminalInteractiveShell.quiet = False
# See also: InteractiveShell.separate_in
# c.TerminalInteractiveShell.separate_in = '\n'
# See also: InteractiveShell.separate_out
# c.TerminalInteractiveShell.separate_out = ''
# See also: InteractiveShell.separate_out2
# c.TerminalInteractiveShell.separate_out2 = ''
## Show rewritten input, e.g. for autocall.
# See also: InteractiveShell.show_rewritten_input
# c.TerminalInteractiveShell.show_rewritten_input = True
## Use `raw_input` for the REPL, without completion and prompt colors.
#
# Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR.
# Known usage are: IPython own testing machinery, and emacs inferior-shell
# integration through elpy.
#
# This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT` environment
# variable is set, or the current terminal is not a tty.
# Default: False
# c.TerminalInteractiveShell.simple_prompt = False
## Number of line at the bottom of the screen to reserve for the tab completion
# menu, search history, ...etc, the height of these menus will at most this
# value. Increase it is you prefer long and skinny menus, decrease for short and
# wide.
# Default: 6
# c.TerminalInteractiveShell.space_for_menu = 6
##
# See also: InteractiveShell.sphinxify_docstring
# c.TerminalInteractiveShell.sphinxify_docstring = False
## Automatically set the terminal title
# Default: True
# c.TerminalInteractiveShell.term_title = True
## Customize the terminal title format. This is a python format string.
# Available substitutions are: {cwd}.
# Default: 'IPython: {cwd}'
# c.TerminalInteractiveShell.term_title_format = 'IPython: {cwd}'
## Use 24bit colors instead of 256 colors in prompt highlighting. If your
# terminal supports true color, the following command should print 'TRUECOLOR'
# in orange: printf "\x1b[38;2;255;100;0mTRUECOLOR\x1b[0m\n"
# Default: False
# c.TerminalInteractiveShell.true_color = False
# See also: InteractiveShell.wildcards_case_sensitive
# c.TerminalInteractiveShell.wildcards_case_sensitive = True
## Switch modes for the IPython exception handlers.
# See also: InteractiveShell.xmode
# c.TerminalInteractiveShell.xmode = 'Context'
# ------------------------------------------------------------------------------
# HistoryAccessor(HistoryAccessorBase) configuration
# ------------------------------------------------------------------------------
## Access the history database without adding to it.
#
# This is intended for use by standalone history tools. IPython shells use
# HistoryManager, below, which is a subclass of this.
## Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database connections.
# Default: {}
# c.HistoryAccessor.connection_options = {}
## enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
# Default: True
# c.HistoryAccessor.enabled = True
## Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
#
# you can also use the specific value `:memory:` (including the colon at both
# end but not the back ticks), to avoid creating an history file.
# Default: ''
# c.HistoryAccessor.hist_file = ''
# ------------------------------------------------------------------------------
# HistoryManager(HistoryAccessor) configuration
# ------------------------------------------------------------------------------
## A class to organize all history-related functionality in one place.
## Options for configuring the SQLite connection
# See also: HistoryAccessor.connection_options
# c.HistoryManager.connection_options = {}
## Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
# Default: 0
# c.HistoryManager.db_cache_size = 0
## Should the history database include output? (default: no)
# Default: False
# c.HistoryManager.db_log_output = False
## enable the SQLite history
# See also: HistoryAccessor.enabled
# c.HistoryManager.enabled = True
## Path to file to use for SQLite history database.
# See also: HistoryAccessor.hist_file
# c.HistoryManager.hist_file = ''
# ------------------------------------------------------------------------------
# ProfileDir(LoggingConfigurable) configuration
# ------------------------------------------------------------------------------
## An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
## Set the profile location directly. This overrides the logic used by the
# `profile` option.
# Default: ''
# c.ProfileDir.location = ''
# ------------------------------------------------------------------------------
# BaseFormatter(Configurable) configuration
# ------------------------------------------------------------------------------
## A base formatter class that is configurable.
#
# This formatter should usually be used as the base class of all formatters. It
# is a traited :class:`Configurable` class and includes an extensible API for
# users to determine how their objects are formatted. The following logic is
# used to find a function to format an given object.
#
# 1. The object is introspected to see if it has a method with the name
# :attr:`print_method`. If is does, that object is passed to that method
# for formatting.
# 2. If no print method is found, three internal dictionaries are consulted
# to find print method: :attr:`singleton_printers`, :attr:`type_printers`
# and :attr:`deferred_printers`.
#
# Users should use these dictionaries to register functions that will be used to
# compute the format data for their objects (if those objects don't have the
# special print methods). The easiest way of using these dictionaries is through
# the :meth:`for_type` and :meth:`for_type_by_name` methods.
#
# If no function/callable is found to compute the format data, ``None`` is
# returned and this format type is not used.
# Default: {}
# c.BaseFormatter.deferred_printers = {}
# Default: True
# c.BaseFormatter.enabled = True
# Default: {}
# c.BaseFormatter.singleton_printers = {}
# Default: {}
# c.BaseFormatter.type_printers = {}
# ------------------------------------------------------------------------------
# PlainTextFormatter(BaseFormatter) configuration
# ------------------------------------------------------------------------------
## The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
# See also: BaseFormatter.deferred_printers
# c.PlainTextFormatter.deferred_printers = {}
# Default: ''
# c.PlainTextFormatter.float_precision = ''
## Truncate large collections (lists, dicts, tuples, sets) to this size.
#
# Set to 0 to disable truncation.
# Default: 1000
# c.PlainTextFormatter.max_seq_length = 1000
# Default: 79
# c.PlainTextFormatter.max_width = 79
# Default: '\n'
# c.PlainTextFormatter.newline = '\n'
# Default: True
# c.PlainTextFormatter.pprint = True
# See also: BaseFormatter.singleton_printers
# c.PlainTextFormatter.singleton_printers = {}
# See also: BaseFormatter.type_printers
# c.PlainTextFormatter.type_printers = {}
# Default: False
# c.PlainTextFormatter.verbose = False
# ------------------------------------------------------------------------------
# Completer(Configurable) configuration
# ------------------------------------------------------------------------------
## Enable unicode completions, e.g. \alpha<tab> . Includes completion of latex
# commands, unicode names, and expanding unicode characters back to latex
# commands.
# Default: True
# c.Completer.backslash_combining_completions = True
## Enable debug for the Completer. Mostly print extra information for
# experimental jedi integration.
# Default: False
# c.Completer.debug = False
## Activate greedy completion PENDING DEPRECTION. this is now mostly taken care
# of with Jedi.
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# Default: False
# c.Completer.greedy = False
## Experimental: restrict time (in milliseconds) during which Jedi can compute
# types. Set to 0 to stop computing types. Non-zero value lower than 100ms may
# hurt performance by preventing jedi to build its cache.
# Default: 400
# c.Completer.jedi_compute_type_timeout = 400
## Experimental: Use Jedi to generate autocompletions. Default to True if jedi is
# installed.
# Default: True
# c.Completer.use_jedi = True
# ------------------------------------------------------------------------------
# IPCompleter(Completer) configuration
# ------------------------------------------------------------------------------
## Extension of the completer class with IPython-specific features
## Enable unicode completions, e.g. \alpha<tab> . Includes completion of latex
# commands, unicode names, and expanding unicode characters back to latex
# commands.
# See also: Completer.backslash_combining_completions
# c.IPCompleter.backslash_combining_completions = True
## Enable debug for the Completer. Mostly print extra information for
# experimental jedi integration.
# See also: Completer.debug
# c.IPCompleter.debug = False
## Activate greedy completion
# See also: Completer.greedy
# c.IPCompleter.greedy = False
## Experimental: restrict time (in milliseconds) during which Jedi can compute
# types.
# See also: Completer.jedi_compute_type_timeout
# c.IPCompleter.jedi_compute_type_timeout = 400
## DEPRECATED as of version 5.0.
#
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# Default: False
# c.IPCompleter.limit_to__all__ = False
## Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# Default: True
# c.IPCompleter.merge_completions = True
## Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# Choices: any of [0, 1, 2]
# Default: 2
# c.IPCompleter.omit__names = 2
## Experimental: Use Jedi to generate autocompletions. Default to True if jedi is
# installed.
# See also: Completer.use_jedi
# c.IPCompleter.use_jedi = True
# ------------------------------------------------------------------------------
# ScriptMagics(Magics) configuration
# ------------------------------------------------------------------------------
## Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
## Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# Default: []
# c.ScriptMagics.script_magics = []
## Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# Default: {}
# c.ScriptMagics.script_paths = {}
# ------------------------------------------------------------------------------
# LoggingMagics(Magics) configuration
# ------------------------------------------------------------------------------
## Magics related to all logging machinery.
## Suppress output of log state when logging is enabled
# Default: False
# c.LoggingMagics.quiet = False
# ------------------------------------------------------------------------------
# StoreMagics(Magics) configuration
# ------------------------------------------------------------------------------
## Lightweight persistence for python variables.
#
# Provides the %store magic.
## If True, any %store-d variables will be automatically restored when IPython
# starts.
# Default: False
# c.StoreMagics.autorestore = False
|
|
from __future__ import absolute_import
from __future__ import print_function
import theano
import theano.tensor as T
import numpy as np
import warnings
import time, json
from collections import deque
from .utils.generic_utils import Progbar
from .utils.plotting_utils import PlotGenerator
class CallbackList(object):
def __init__(self, callbacks=[], queue_length=10):
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
def append(self, callback):
self.callbacks.append(callback)
def _set_params(self, params):
for callback in self.callbacks:
callback._set_params(params)
def _set_model(self, model):
for callback in self.callbacks:
callback._set_model(model)
def on_epoch_begin(self, epoch, logs={}):
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._delta_t_batch = 0.
self._delta_ts_batch_begin = deque([], maxlen=self.queue_length)
self._delta_ts_batch_end = deque([], maxlen=self.queue_length)
def on_epoch_end(self, epoch, logs={}):
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_batch_begin(self, batch, logs={}):
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_begin(batch, logs)
self._delta_ts_batch_begin.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_begin)
if self._delta_t_batch > 0. and delta_t_median > 0.95 * self._delta_t_batch \
and delta_t_median > 0.1:
warnings.warn('Method on_batch_begin() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
self._t_enter_batch = time.time()
def on_batch_end(self, batch, logs={}):
self._delta_t_batch = time.time() - self._t_enter_batch
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_end(batch, logs)
self._delta_ts_batch_end.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_end)
if self._delta_t_batch > 0. and delta_t_median > 0.95 * self._delta_t_batch \
and delta_t_median > 0.1:
warnings.warn('Method on_batch_end() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
def on_train_begin(self, logs={}):
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs={}):
for callback in self.callbacks:
callback.on_train_end(logs)
class Callback(object):
def __init__(self):
pass
def _set_params(self, params):
self.params = params
def _set_model(self, model):
self.model = model
def on_epoch_begin(self, epoch, logs={}):
pass
def on_epoch_end(self, epoch, logs={}):
pass
def on_batch_begin(self, batch, logs={}):
pass
def on_batch_end(self, batch, logs={}):
pass
def on_train_begin(self, logs={}):
pass
def on_train_end(self, logs={}):
pass
class BaseLogger(Callback):
def on_train_begin(self, logs={}):
self.verbose = self.params['verbose']
def on_epoch_begin(self, epoch, logs={}):
if self.verbose:
print('Epoch %d' % epoch)
self.progbar = Progbar(target=self.params['nb_sample'], \
verbose=self.verbose)
self.current = 0
self.tot_loss = 0.
self.tot_acc = 0.
def on_batch_begin(self, batch, logs={}):
if self.current < self.params['nb_sample']:
self.log_values = []
def on_batch_end(self, batch, logs={}):
batch_size = logs.get('size', 0)
self.current += batch_size
loss = logs.get('loss')
self.log_values.append(('loss', loss))
self.tot_loss += loss * batch_size
if self.params['show_accuracy']:
accuracy = logs.get('accuracy')
self.log_values.append(('acc.', accuracy))
self.tot_acc += accuracy * batch_size
# skip progbar update for the last batch; will be handled by on_epoch_end
if self.verbose and self.current < self.params['nb_sample']:
self.progbar.update(self.current, self.log_values)
def on_epoch_end(self, epoch, logs={}):
self.log_values.append(('loss', self.tot_loss / self.current))
if self.params['show_accuracy']:
self.log_values.append(('acc.', self.tot_acc / self.current))
if self.params['do_validation']:
val_loss = logs.get('val_loss')
self.log_values.append(('val. loss', val_loss))
if self.params['show_accuracy']:
val_acc = logs.get('val_accuracy')
self.log_values.append(('val. acc.', val_acc))
self.progbar.update(self.current, self.log_values)
class History(Callback):
def on_train_begin(self, logs={}):
self.epoch = []
self.loss = []
if self.params['show_accuracy']:
self.accuracy = []
if self.params['do_validation']:
self.validation_loss = []
if self.params['show_accuracy']:
self.validation_accuracy = []
def on_epoch_begin(self, epoch, logs={}):
self.seen = 0
self.tot_loss = 0.
self.tot_accuracy = 0.
def on_batch_end(self, batch, logs={}):
batch_size = logs.get('size', 0)
self.seen += batch_size
self.tot_loss += logs.get('loss', 0.) * batch_size
if self.params['show_accuracy']:
self.tot_accuracy += logs.get('accuracy', 0.) * batch_size
def on_epoch_end(self, epoch, logs={}):
val_loss = logs.get('val_loss')
val_acc = logs.get('val_accuracy')
self.epoch.append(epoch)
self.loss.append(self.tot_loss / self.seen)
if self.params['show_accuracy']:
self.accuracy.append(self.tot_accuracy / self.seen)
if self.params['do_validation']:
self.validation_loss.append(val_loss)
if self.params['show_accuracy']:
self.validation_accuracy.append(val_acc)
class Plotter(History):
# see PlotGenerator.__init__() for a description of the parameters
def __init__(self,
save_to_filepath=None, show_plot_window=True,
linestyles=None, linestyles_first_epoch=None,
show_regressions=True,
poly_forward_perc=0.1, poly_backward_perc=0.2,
poly_n_forward_min=5, poly_n_backward_min=10,
poly_degree=1):
super(Plotter, self).__init__()
pgen = PlotGenerator(linestyles=linestyles,
linestyles_first_epoch=linestyles_first_epoch,
show_regressions=show_regressions,
poly_forward_perc=poly_forward_perc,
poly_backward_perc=poly_backward_perc,
poly_n_forward_min=poly_n_forward_min,
poly_n_backward_min=poly_n_backward_min,
poly_degree=poly_degree,
show_plot_window=show_plot_window,
save_to_filepath=save_to_filepath)
self.plot_generator = pgen
def on_epoch_end(self, epoch, logs={}):
super(Plotter, self).on_epoch_end(epoch, logs)
dv = self.params['do_validation']
sa = self.params['show_accuracy']
train_loss = self.loss
val_loss = self.validation_loss if dv else []
train_acc = self.accuracy if sa else []
val_acc = self.validation_accuracy if dv and sa else []
self.plot_generator.update(epoch, train_loss, train_acc,
val_loss, val_acc)
class ModelCheckpoint(Callback):
def __init__(self, filepath, verbose=0, save_best_only=False):
super(Callback, self).__init__()
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.loss = []
self.best_loss = np.Inf
self.val_loss = []
self.best_val_loss = np.Inf
def on_epoch_end(self, epoch, logs={}):
if self.save_best_only and self.params['do_validation']:
cur_val_loss = logs.get('val_loss')
self.val_loss.append(cur_val_loss)
if cur_val_loss < self.best_val_loss:
if self.verbose > 0:
print("Epoch %05d: validation loss improved from %0.5f to %0.5f, saving model to %s"
% (epoch, self.best_val_loss, cur_val_loss, self.filepath))
self.best_val_loss = cur_val_loss
self.model.save_weights(self.filepath, overwrite=True)
else:
if self.verbose > 0:
print("Epoch %05d: validation loss did not improve" % (epoch))
elif self.save_best_only and not self.params['do_validation']:
warnings.warn("Can save best model only with validation data, skipping", RuntimeWarning)
elif not self.save_best_only:
if self.verbose > 0:
print("Epoch %05d: saving model to %s" % (epoch, self.filepath))
self.model.save_weights(self.filepath, overwrite=True)
class EarlyStopping(Callback):
def __init__(self, patience=0, verbose=0):
super(Callback, self).__init__()
self.patience = patience
self.verbose = verbose
self.best_val_loss = np.Inf
self.wait = 0
def on_epoch_end(self, epoch, logs={}):
if not self.params['do_validation']:
warnings.warn("Early stopping requires validation data!", RuntimeWarning)
cur_val_loss = logs.get('val_loss')
if cur_val_loss < self.best_val_loss:
self.best_val_loss = cur_val_loss
self.wait = 0
else:
if self.wait >= self.patience:
if self.verbose > 0:
print("Epoch %05d: early stopping" % (epoch))
self.model.stop_training = True
self.wait += 1
class RemoteMonitor(Callback):
def __init__(self, root='http://localhost:9000'):
self.root = root
self.seen = 0
self.tot_loss = 0.
self.tot_accuracy = 0.
def on_epoch_begin(self, epoch, logs={}):
self.seen = 0
self.tot_loss = 0.
self.tot_accuracy = 0.
def on_batch_end(self, batch, logs={}):
batch_size = logs.get('size', 0)
self.seen += batch_size
self.tot_loss += logs.get('loss', 0.) * batch_size
if self.params['show_accuracy']:
self.tot_accuracy += logs.get('accuracy', 0.) * batch_size
def on_epoch_end(self, epoch, logs={}):
import requests
logs['epoch'] = epoch
logs['loss'] = self.tot_loss / self.seen
r = requests.post(self.root + '/publish/epoch/end/', {'data':json.dumps(logs)})
|
|
# -*- coding: utf-8 -*-
import time
import urlparse
import datetime
import pytz
from django.conf import settings
from django.db import models
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import make_aware, now
from django.contrib.auth.models import User
from django_orm.postgresql import hstore
from .utils.pypi import DEFAULT_SERVER, CheeseShop, normalize_name
PROVIDES = ('pypi',)
class Package(models.Model):
PYPI = 'pypi'
PROVIDER_CHOICES = (
(PYPI, 'PyPi'),
)
name = models.CharField(_('name'), max_length=255, unique=True)
normalized_name = models.CharField(_('Normalized Name'), max_length=255,
unique=True)
url = models.URLField(_('url'))
provider = models.CharField(_('provider'), max_length=255,
choices=PROVIDER_CHOICES, default=PYPI)
initial_sync_done = models.BooleanField(default=False)
@classmethod
def create_with_provider_url(cls, name, provider='pypi', url=None):
if url is None:
url = urlparse.urljoin(DEFAULT_SERVER, name)
pkg = cls(name=name, url=url, provider=provider)
pkg.save()
return pkg
def sync_versions(self):
if self.initial_sync_done:
return
client = CheeseShop()
versions = client.get_package_versions(self.name)
for version in versions:
urls = client.get_release_urls(self.name, version)
if urls:
url = urls[0]
utime = time.mktime(url['upload_time'].timetuple())
release_date = make_aware(
datetime.datetime.fromtimestamp(utime),
pytz.UTC)
PackageVersion.objects.get_or_create(
package=self,
version=version,
release_date=release_date)
self.initial_sync_done = True
self.save()
def save(self, *args, **kwargs):
if not self.normalized_name:
self.normalized_name = normalize_name(self.name)
super(Package, self).save(*args, **kwargs)
class Meta:
verbose_name = _('package')
verbose_name_plural = _('packages')
unique_together = ('name', 'provider')
def __unicode__(self):
return self.name
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.name)
class PackageVersion(models.Model):
package = models.ForeignKey(Package, verbose_name=_('package'),
related_name='versions')
version = models.CharField(_('version'), max_length=255)
release_date = models.DateTimeField(_('release date'))
class Meta:
verbose_name = _('package version')
verbose_name_plural = _('package versions')
unique_together = ('package', 'version')
def __unicode__(self):
return '{}{}'.format(self.package.name, self.version)
class ProjectMember(models.Model):
OWNER = 0
MEMBER = 1
STATE_CHOICES = (
(OWNER, _('Owner')),
(MEMBER, _('Member'))
)
project = models.ForeignKey('Project', verbose_name=_('project'))
user = models.ForeignKey(User, verbose_name=_('user'))
state = models.IntegerField(_('state'), choices=STATE_CHOICES)
mail = models.EmailField(_('Email'), max_length=255, blank=True)
jabber = models.CharField(_('Jabber'), max_length=255, blank=True)
class Meta:
verbose_name = _('project member')
verbose_name_plural = _('project members')
unique_together = ('project', 'user')
class Project(models.Model):
name = models.CharField(_('name'), max_length=255)
slug = models.SlugField(_('slug'), unique=True)
members = models.ManyToManyField(User, through=ProjectMember,
verbose_name=_('members'))
class Meta:
verbose_name = _('project')
verbose_name_plural = _('projects')
@models.permalink
def get_absolute_url(self):
return 'folivora_project_detail', (), {'slug': self.slug}
def create_logentry(self, type, action, user=None, **kwargs):
Log.objects.create(project=self, type=type,
action=action, data=kwargs, user=user)
@property
def requirements(self):
query = ProjectDependency.objects.filter(project=self) \
.select_related('package') \
.order_by('package__name')
return "\n".join([d.dependency_string for d in query])
@property
def requirement_dict(self):
query = ProjectDependency.objects.filter(project=self) \
.select_related('package')
return dict((d.package.name, d.version) for d in query)
def process_changes(self, user, remove=None, change=None, add=None):
log_entries = []
remove = remove if remove else []
change = change if change else []
add = add if add else []
for package_id, version in add:
log_entries.append(Log(type='project_dependency', action='add',
project_id=self.pk, package_id=package_id,
user=user, data={'version': version}))
for package_id, version in remove:
log_entries.append(Log(type='project_dependency', action='remove',
project_id=self.pk, package_id=package_id,
user=user, data={'version': version}))
for package_id, old_version, new_version in change:
log_entries.append(Log(type='project_dependency', action='update',
project_id=self.pk, package_id=package_id,
user=user, data={'version': new_version,
'old_version': old_version}))
Log.objects.bulk_create(log_entries)
from .tasks import sync_project
# give the request time to finish before syncing
sync_project.apply_async(args=[self.pk], countdown=1)
@property
def owners(self):
return self.members.filter(projectmember__state=ProjectMember.OWNER)
class ProjectDependency(models.Model):
project = models.ForeignKey(Project, verbose_name=_('project'),
related_name='dependencies')
package = models.ForeignKey(Package, verbose_name=_('package'))
version = models.CharField(_('version'), max_length=255)
update = models.ForeignKey(PackageVersion, verbose_name=_('update'),
null=True, blank=True, default=None)
class Meta:
verbose_name = _('project dependency')
verbose_name_plural = _('project dependencies')
unique_together = ('project', 'package')
@property
def dependency_string(self):
return u"%s==%s" % (self.package.name, self.version)
@property
def update_available(self):
return self.update_id is not None
@classmethod
def process_formset(cls, formset, original_data, user):
remove = []
change = []
for instance in formset.deleted_objects:
remove.append((instance.package.id, instance.version))
for instance, d in formset.changed_objects:
existing = original_data[instance.pk]
change.append((instance.package.id,
existing.version,
instance.version))
formset.instance.process_changes(user, remove, change)
class Log(models.Model):
project = models.ForeignKey(Project, verbose_name=_('project'))
package = models.ForeignKey(Package, verbose_name=_('package'),
null=True, blank=True, default=None)
user = models.ForeignKey(User, verbose_name=_('user'), null=True)
when = models.DateTimeField(_('when'), default=now)
action = models.CharField(_('action'), max_length=255)
type = models.CharField(_('type'), max_length=255)
data = hstore.DictionaryField()
objects = hstore.HStoreManager()
class Meta:
verbose_name = _('log')
verbose_name_plural = _('logs')
def __unicode__(self):
return '{}.{}, package: {}, project: {}'.format(self.type, self.action,
self.package_id,
self.project_id)
@property
def template(self):
return 'folivora/notifications/{}.{}.html'.format(self.type,
self.action)
class SyncState(models.Model):
"""Generic model to store syncronization states."""
CHANGELOG = 'changelog'
TYPE_CHOICES = (
(_('Changelog'), CHANGELOG),
)
STATE_DOWN = 'down'
STATE_RUNNING = 'running'
STATE_CHOICES = (
(_('Down'), STATE_DOWN),
(_('Running'), STATE_RUNNING),
)
type = models.CharField(max_length=255, choices=TYPE_CHOICES, unique=True)
state = models.CharField(max_length=255, choices=STATE_CHOICES,
default=STATE_RUNNING)
last_sync = models.DateTimeField(_('Last Sync'), default=now)
class UserProfile(models.Model):
user = models.OneToOneField(User)
language = models.CharField(_('Language'), max_length=255,
choices=settings.LANGUAGES, blank=True)
timezone = models.CharField(_('Timezone'), max_length=255, default='UTC')
jabber = models.CharField(_('JID'), max_length=255, blank=True)
def get_absolute_url(self):
return reverse('folivora_profile_edit')
|
|
import os,sys, psycopg2,flask,jsonify,request,json
from flask_cors import CORS
from psycopg2.extras import RealDictCursor
#https://tecadmin.net/install-python-3-7-on-ubuntu-linuxmint/
#https://bitlaunch.io/blog/create-a-bitcoin-node-with-ubuntu/
#nohup pythonScript.py
conn_string = "host='localhost' dbname='swift' user='postgres' password='Guatemala1'"
NETWORK = 'mainnet'
#################################### PGADMIN ################################################
#https://phoenixnap.com/kb/how-to-connect-postgresql-database-command-line
def recordAddress(address,amount):
conn = psycopg2.connect(conn_string)
cursor = conn.cursor()
q = """insert into public."addresses" (address,status,datetime,amount) values('"""+str(address) +"','1',now(),'""" + str(amount) + "')"
print(q)
cursor.execute(q)
conn.commit()
conn.close()
return addressStatusRequest(address)
def getMessages(toMsg):
conn = psycopg2.connect(conn_string)
list = []
cursor = conn.cursor(cursor_factory=RealDictCursor)
cursor.execute("""select msg from public."offlineChat" where tomsg = '"""+ toMsg +"'")
conn.close()
l = json.dumps(cursor.fetchall(),indent = 2)
return l
def grabaMensaje(data):
msg = str(data)#str(data['msg'])
tomsg = str(data['tomsg'])
sent = str(data['sent'])
datetime = str(data['datetime'])
conn = psycopg2.connect(conn_string)
cursor = conn.cursor()
print(msg + " " + tomsg + " " + str(sent) + " " + datetime)
#query = """ insert into public."offlineChat" (msg,tomsg,sent,datetime) values ( '""" + data +"','" + tomsg +"'," + sent + ",'" + datetime + "')"
q = """insert into public."offlineChat" (msg,tomsg,sent,datetime) values('"""+ msg.replace("'", "\"") +"','"+ tomsg +"','"+sent+"','" +datetime +"')"
#print(q)
cursor.execute(q)
conn.commit()
conn.close()
def borrarMensaje(tomsg):
conn = psycopg2.connect(conn_string)
cursor = conn.cursor()
q = """ delete from public."offlineChat" where tomsg = '"""+ tomsg +"' "
#print(q)
cursor.execute(q)
conn.commit()
conn.close()
########################################## BTC ##########################################################
from bitcoin import *
from pywallet import wallet
def keyWith3(my_public_key1):
#my_public_key1 = privtopub(pKey)
#print('Public Key 1: ' + my_public_key1)
my_multi_sig = mk_multisig_script(my_public_key1, my_public_key1, my_public_key1, 2,3)
my_multi_address = scriptaddr(my_multi_sig)
#print('Multi-Address: ' + my_multi_address)
return my_multi_address
#https://github.com/ranaroussi/pywallet
def wall3():
# generate 12 word mnemonic seed
seed = wallet.generate_mnemonic()
# create bitcoin wallet
w = wallet.create_wallet(network='BTC',seed=None,children=0)#TESTNET
return w
def createKey(WALLET_PUBKEY,childNumber):
user_addr = wallet.create_address(network='BTC',xpub=WALLET_PUBKEY,child=int(childNumber))# CREAMOS NUEVAS DIRECCIONES TESTNET
#rand_addr = wallet.create_address(network="btctest", xpub=WALLET_PUBKEY)
json = str(user_addr)
#print(json)
return json
def newKeyWith3(my_public_key1):
my_public_key2 = privtopub(random_key())
print(my_public_key1)
print(my_public_key2)
#print(my_public_key3)
my_multi_sig = mk_multisig_script(my_public_key1, my_public_key1, my_public_key2, 2,3)
my_multi_address = scriptaddr(my_multi_sig)
#print('Multi-Address: ' + my_multi_address)
return my_multi_address
#https://community.microstrategy.com/s/article/Sample-REST-API-in-Python-Authentication?language=en_US
##################################### SERVICES ################################################################
route = r"/api/mobil/"
pwdRestful = 'pwd'#'b47fcec016832713da3ef91ff64d7d42d3649c830e0964eab51b99ebdb0f88f3037dd43c8a71cad25d3e497be6f355f43b510e8ac59a46389f58eb7fat7fdi3w5s'
usrRestful = 'usrRestful'
#https://blog.miguelgrinberg.com/post/restful-authentication-with-flask
from flask import Flask,request
app = Flask(__name__)
cors = CORS(app, resources={route + '*': {"origins": "*"}})
def checkCredentials(usr, pwd):
if usr == usrRestful and pwd == pwdRestful:
return True
else:
return False
def errorMsg():
return abort(400)#"""{\"msg\":\"error\"}"""
def okMsg():
return """{\"msg\":\"ok\"}"""
def addressStatusRequest(address):
print('address postgres req')
conn = psycopg2.connect(conn_string)
cursor = conn.cursor()
q = """select status from public."addresses" where address = '"""+ address +"'"
cursor.execute(q)
print(q)
s = "";
rows = cursor.fetchall()
if cursor.rowcount == 0:
conn.close()
return """{\"status\":\"0\"}"""
else:
for row in rows:
s = """{\"status\":\"""" + str(row[0]) + """\"}"""
conn.close()
return s
@app.route(route + 'newAddress',methods=['GET'])
def newAddress():
key = request.args.get('key')
child = request.args.get('child')
return createKey(key,child)
@app.route(route + 'addressStatus',methods=['GET'])
def addressStatus():
address = request.args.get('address')
print(address)
return addressStatusRequest(address)
@app.route(route + 'postAddress',methods=['POST'])
def postAddress():
address = request.args.get('address')
amount = request.args.get('amount')
return recordAddress(address,amount)
@app.route(route + 'getNewTransaction',methods=['GET'])
def createNewChildWallet2():
#request.args.get('id')
publicKey = request.args.get('publicKey')
publicChildNum = request.args.get('publicChildNum')
print(publicKey)
print(publicChildNum)
return createKey(publicKey,publicChildNum)
@app.route(route + 'getNewChild',methods=['GET'])
def createNewChildWallet():
publicKey = request.args.get('publicKey')
publicChildNum = request.args.get('publicChildNum')
print(publicKey)
print(publicChildNum)
return createKey(publicKey,publicChildNum)
#http://localhost:1500/api/[email protected]
@app.route(route + 'createWallet',methods=['GET'])
def createWallet():
print('va por create wallet')
#request.args.get('id')
w = wall3()
privatek = w["private_key"]
publick = w["public_key"]
address = w["address"]
seed = w["seed"]
coin = w["coin"]
address3 = keyWith3(publick)
xprivate_key = w["xprivate_key"]
xpublic_key = w["xpublic_key"]
xpublic_key_prime = w["xpublic_key_prime"]
json = '{ "coin":"'+coin+'", ' + '"private_key" : "'+privatek+'", "public_key" :' +'"'+publick+'", ' + '"address" : "'+address+'", "Multi_address" : "'+address3+'", "seed":"' + seed +'", '
json = json + '"xprivate_key":"'+xprivate_key+'", "xpublic_key" : "' + xpublic_key + '", "xpublic_key_prime" :' + '"' + xpublic_key_prime + '" }'
print(json)
return json
@app.route(route + 'get',methods=['GET'])
def getMsgs():
print('get messages from: ' + id)
return getMessages(id)
@app.route(route + 'post',methods=['POST'])
def postMsg():
print('posting message')
req_data = request.get_json()
print(req_data)
return """{\"msg\":\"ok\"}"""#requests.status_codes._codes[200]
if __name__ == '__main__':
#app.run(debug=True)
print("starting chat service MAINNET ")
#app.run(ssl_context='adhoc',host='0.0.0.0', port=1700)
app.run(host='0.0.0.0', port=1700)
#createKey('tpubD6NzVbkrYhZ4X3ytjTHoSnmHsUdgXiBTm4LQh6FXXGi7uqRVmvR4h8poyTbnxXrDm9xhpqV8ioTJ884wQ7mvaDZCBvsYRa1fCMSJrW7U1Bp',2)
|
|
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Performance runner for d8.
Call e.g. with tools/run-perf.py --arch ia32 some_suite.json
The suite json format is expected to be:
{
"path": <relative path chunks to perf resources and main file>,
"name": <optional suite name, file name is default>,
"archs": [<architecture name for which this suite is run>, ...],
"binary": <name of binary to run, default "d8">,
"flags": [<flag to d8>, ...],
"test_flags": [<flag to the test file>, ...],
"run_count": <how often will this suite run (optional)>,
"run_count_XXX": <how often will this suite run for arch XXX (optional)>,
"resources": [<js file to be moved to android device>, ...]
"main": <main js perf runner file>,
"results_regexp": <optional regexp>,
"results_processor": <optional python results processor script>,
"units": <the unit specification for the performance dashboard>,
"tests": [
{
"name": <name of the trace>,
"results_regexp": <optional more specific regexp>,
"results_processor": <optional python results processor script>,
"units": <the unit specification for the performance dashboard>,
}, ...
]
}
The tests field can also nest other suites in arbitrary depth. A suite
with a "main" file is a leaf suite that can contain one more level of
tests.
A suite's results_regexp is expected to have one string place holder
"%s" for the trace name. A trace's results_regexp overwrites suite
defaults.
A suite's results_processor may point to an optional python script. If
specified, it is called after running the tests like this (with a path
relatve to the suite level's path):
<results_processor file> <same flags as for d8> <suite level name> <output>
The <output> is a temporary file containing d8 output. The results_regexp will
be applied to the output of this script.
A suite without "tests" is considered a performance test itself.
Full example (suite with one runner):
{
"path": ["."],
"flags": ["--expose-gc"],
"test_flags": ["5"],
"archs": ["ia32", "x64"],
"run_count": 5,
"run_count_ia32": 3,
"main": "run.js",
"results_regexp": "^%s: (.+)$",
"units": "score",
"tests": [
{"name": "Richards"},
{"name": "DeltaBlue"},
{"name": "NavierStokes",
"results_regexp": "^NavierStokes: (.+)$"}
]
}
Full example (suite with several runners):
{
"path": ["."],
"flags": ["--expose-gc"],
"archs": ["ia32", "x64"],
"run_count": 5,
"units": "score",
"tests": [
{"name": "Richards",
"path": ["richards"],
"main": "run.js",
"run_count": 3,
"results_regexp": "^Richards: (.+)$"},
{"name": "NavierStokes",
"path": ["navier_stokes"],
"main": "run.js",
"results_regexp": "^NavierStokes: (.+)$"}
]
}
Path pieces are concatenated. D8 is always run with the suite's path as cwd.
The test flags are passed to the js test file after '--'.
"""
from collections import OrderedDict
import json
import logging
import math
import optparse
import os
import re
import sys
from testrunner.local import commands
from testrunner.local import utils
ARCH_GUESS = utils.DefaultArch()
SUPPORTED_ARCHS = ["android_arm",
"android_arm64",
"android_ia32",
"android_x64",
"arm",
"ia32",
"mips",
"mipsel",
"nacl_ia32",
"nacl_x64",
"x64",
"arm64"]
GENERIC_RESULTS_RE = re.compile(r"^RESULT ([^:]+): ([^=]+)= ([^ ]+) ([^ ]*)$")
RESULT_STDDEV_RE = re.compile(r"^\{([^\}]+)\}$")
RESULT_LIST_RE = re.compile(r"^\[([^\]]+)\]$")
def LoadAndroidBuildTools(path): # pragma: no cover
assert os.path.exists(path)
sys.path.insert(0, path)
from pylib.device import device_utils # pylint: disable=F0401
from pylib.device import device_errors # pylint: disable=F0401
from pylib.perf import cache_control # pylint: disable=F0401
from pylib.perf import perf_control # pylint: disable=F0401
import pylib.android_commands # pylint: disable=F0401
global cache_control
global device_errors
global device_utils
global perf_control
global pylib
def GeometricMean(values):
"""Returns the geometric mean of a list of values.
The mean is calculated using log to avoid overflow.
"""
values = map(float, values)
return str(math.exp(sum(map(math.log, values)) / len(values)))
class Results(object):
"""Place holder for result traces."""
def __init__(self, traces=None, errors=None):
self.traces = traces or []
self.errors = errors or []
def ToDict(self):
return {"traces": self.traces, "errors": self.errors}
def WriteToFile(self, file_name):
with open(file_name, "w") as f:
f.write(json.dumps(self.ToDict()))
def __add__(self, other):
self.traces += other.traces
self.errors += other.errors
return self
def __str__(self): # pragma: no cover
return str(self.ToDict())
class Node(object):
"""Represents a node in the suite tree structure."""
def __init__(self, *args):
self._children = []
def AppendChild(self, child):
self._children.append(child)
class DefaultSentinel(Node):
"""Fake parent node with all default values."""
def __init__(self):
super(DefaultSentinel, self).__init__()
self.binary = "d8"
self.run_count = 10
self.timeout = 60
self.path = []
self.graphs = []
self.flags = []
self.test_flags = []
self.resources = []
self.results_regexp = None
self.stddev_regexp = None
self.units = "score"
self.total = False
class Graph(Node):
"""Represents a suite definition.
Can either be a leaf or an inner node that provides default values.
"""
def __init__(self, suite, parent, arch):
super(Graph, self).__init__()
self._suite = suite
assert isinstance(suite.get("path", []), list)
assert isinstance(suite["name"], basestring)
assert isinstance(suite.get("flags", []), list)
assert isinstance(suite.get("test_flags", []), list)
assert isinstance(suite.get("resources", []), list)
# Accumulated values.
self.path = parent.path[:] + suite.get("path", [])
self.graphs = parent.graphs[:] + [suite["name"]]
self.flags = parent.flags[:] + suite.get("flags", [])
self.test_flags = parent.test_flags[:] + suite.get("test_flags", [])
# Values independent of parent node.
self.resources = suite.get("resources", [])
# Descrete values (with parent defaults).
self.binary = suite.get("binary", parent.binary)
self.run_count = suite.get("run_count", parent.run_count)
self.run_count = suite.get("run_count_%s" % arch, self.run_count)
self.timeout = suite.get("timeout", parent.timeout)
self.timeout = suite.get("timeout_%s" % arch, self.timeout)
self.units = suite.get("units", parent.units)
self.total = suite.get("total", parent.total)
# A regular expression for results. If the parent graph provides a
# regexp and the current suite has none, a string place holder for the
# suite name is expected.
# TODO(machenbach): Currently that makes only sense for the leaf level.
# Multiple place holders for multiple levels are not supported.
if parent.results_regexp:
regexp_default = parent.results_regexp % re.escape(suite["name"])
else:
regexp_default = None
self.results_regexp = suite.get("results_regexp", regexp_default)
# A similar regular expression for the standard deviation (optional).
if parent.stddev_regexp:
stddev_default = parent.stddev_regexp % re.escape(suite["name"])
else:
stddev_default = None
self.stddev_regexp = suite.get("stddev_regexp", stddev_default)
class Trace(Graph):
"""Represents a leaf in the suite tree structure.
Handles collection of measurements.
"""
def __init__(self, suite, parent, arch):
super(Trace, self).__init__(suite, parent, arch)
assert self.results_regexp
self.results = []
self.errors = []
self.stddev = ""
def ConsumeOutput(self, stdout):
try:
result = re.search(self.results_regexp, stdout, re.M).group(1)
self.results.append(str(float(result)))
except ValueError:
self.errors.append("Regexp \"%s\" returned a non-numeric for test %s."
% (self.results_regexp, self.graphs[-1]))
except:
self.errors.append("Regexp \"%s\" didn't match for test %s."
% (self.results_regexp, self.graphs[-1]))
try:
if self.stddev_regexp and self.stddev:
self.errors.append("Test %s should only run once since a stddev "
"is provided by the test." % self.graphs[-1])
if self.stddev_regexp:
self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
except:
self.errors.append("Regexp \"%s\" didn't match for test %s."
% (self.stddev_regexp, self.graphs[-1]))
def GetResults(self):
return Results([{
"graphs": self.graphs,
"units": self.units,
"results": self.results,
"stddev": self.stddev,
}], self.errors)
class Runnable(Graph):
"""Represents a runnable suite definition (i.e. has a main file).
"""
@property
def main(self):
return self._suite.get("main", "")
def ChangeCWD(self, suite_path):
"""Changes the cwd to to path defined in the current graph.
The tests are supposed to be relative to the suite configuration.
"""
suite_dir = os.path.abspath(os.path.dirname(suite_path))
bench_dir = os.path.normpath(os.path.join(*self.path))
os.chdir(os.path.join(suite_dir, bench_dir))
def GetCommandFlags(self):
suffix = ["--"] + self.test_flags if self.test_flags else []
return self.flags + [self.main] + suffix
def GetCommand(self, shell_dir):
# TODO(machenbach): This requires +.exe if run on windows.
return [os.path.join(shell_dir, self.binary)] + self.GetCommandFlags()
def Run(self, runner):
"""Iterates over several runs and handles the output for all traces."""
for stdout in runner():
for trace in self._children:
trace.ConsumeOutput(stdout)
res = reduce(lambda r, t: r + t.GetResults(), self._children, Results())
if not res.traces or not self.total:
return res
# Assume all traces have the same structure.
if len(set(map(lambda t: len(t["results"]), res.traces))) != 1:
res.errors.append("Not all traces have the same number of results.")
return res
# Calculate the geometric means for all traces. Above we made sure that
# there is at least one trace and that the number of results is the same
# for each trace.
n_results = len(res.traces[0]["results"])
total_results = [GeometricMean(t["results"][i] for t in res.traces)
for i in range(0, n_results)]
res.traces.append({
"graphs": self.graphs + ["Total"],
"units": res.traces[0]["units"],
"results": total_results,
"stddev": "",
})
return res
class RunnableTrace(Trace, Runnable):
"""Represents a runnable suite definition that is a leaf."""
def __init__(self, suite, parent, arch):
super(RunnableTrace, self).__init__(suite, parent, arch)
def Run(self, runner):
"""Iterates over several runs and handles the output."""
for stdout in runner():
self.ConsumeOutput(stdout)
return self.GetResults()
class RunnableGeneric(Runnable):
"""Represents a runnable suite definition with generic traces."""
def __init__(self, suite, parent, arch):
super(RunnableGeneric, self).__init__(suite, parent, arch)
def Run(self, runner):
"""Iterates over several runs and handles the output."""
traces = OrderedDict()
for stdout in runner():
for line in stdout.strip().splitlines():
match = GENERIC_RESULTS_RE.match(line)
if match:
stddev = ""
graph = match.group(1)
trace = match.group(2)
body = match.group(3)
units = match.group(4)
match_stddev = RESULT_STDDEV_RE.match(body)
match_list = RESULT_LIST_RE.match(body)
errors = []
if match_stddev:
result, stddev = map(str.strip, match_stddev.group(1).split(","))
results = [result]
elif match_list:
results = map(str.strip, match_list.group(1).split(","))
else:
results = [body.strip()]
try:
results = map(lambda r: str(float(r)), results)
except ValueError:
results = []
errors = ["Found non-numeric in %s" %
"/".join(self.graphs + [graph, trace])]
trace_result = traces.setdefault(trace, Results([{
"graphs": self.graphs + [graph, trace],
"units": (units or self.units).strip(),
"results": [],
"stddev": "",
}], errors))
trace_result.traces[0]["results"].extend(results)
trace_result.traces[0]["stddev"] = stddev
return reduce(lambda r, t: r + t, traces.itervalues(), Results())
def MakeGraph(suite, arch, parent):
"""Factory method for making graph objects."""
if isinstance(parent, Runnable):
# Below a runnable can only be traces.
return Trace(suite, parent, arch)
elif suite.get("main"):
# A main file makes this graph runnable.
if suite.get("tests"):
# This graph has subgraphs (traces).
return Runnable(suite, parent, arch)
else:
# This graph has no subgraphs, it's a leaf.
return RunnableTrace(suite, parent, arch)
elif suite.get("generic"):
# This is a generic suite definition. It is either a runnable executable
# or has a main js file.
return RunnableGeneric(suite, parent, arch)
elif suite.get("tests"):
# This is neither a leaf nor a runnable.
return Graph(suite, parent, arch)
else: # pragma: no cover
raise Exception("Invalid suite configuration.")
def BuildGraphs(suite, arch, parent=None):
"""Builds a tree structure of graph objects that corresponds to the suite
configuration.
"""
parent = parent or DefaultSentinel()
# TODO(machenbach): Implement notion of cpu type?
if arch not in suite.get("archs", SUPPORTED_ARCHS):
return None
graph = MakeGraph(suite, arch, parent)
for subsuite in suite.get("tests", []):
BuildGraphs(subsuite, arch, graph)
parent.AppendChild(graph)
return graph
def FlattenRunnables(node, node_cb):
"""Generator that traverses the tree structure and iterates over all
runnables.
"""
node_cb(node)
if isinstance(node, Runnable):
yield node
elif isinstance(node, Node):
for child in node._children:
for result in FlattenRunnables(child, node_cb):
yield result
else: # pragma: no cover
raise Exception("Invalid suite configuration.")
class Platform(object):
@staticmethod
def GetPlatform(options):
if options.arch.startswith("android"):
return AndroidPlatform(options)
else:
return DesktopPlatform(options)
class DesktopPlatform(Platform):
def __init__(self, options):
self.shell_dir = options.shell_dir
def PreExecution(self):
pass
def PostExecution(self):
pass
def PreTests(self, node, path):
if isinstance(node, Runnable):
node.ChangeCWD(path)
def Run(self, runnable, count):
output = commands.Execute(runnable.GetCommand(self.shell_dir),
timeout=runnable.timeout)
print ">>> Stdout (#%d):" % (count + 1)
print output.stdout
if output.stderr: # pragma: no cover
# Print stderr for debugging.
print ">>> Stderr (#%d):" % (count + 1)
print output.stderr
if output.timed_out:
print ">>> Test timed out after %ss." % runnable.timeout
return output.stdout
class AndroidPlatform(Platform): # pragma: no cover
DEVICE_DIR = "/data/local/tmp/v8/"
def __init__(self, options):
self.shell_dir = options.shell_dir
LoadAndroidBuildTools(options.android_build_tools)
if not options.device:
# Detect attached device if not specified.
devices = pylib.android_commands.GetAttachedDevices(
hardware=True, emulator=False, offline=False)
assert devices and len(devices) == 1, (
"None or multiple devices detected. Please specify the device on "
"the command-line with --device")
options.device = devices[0]
adb_wrapper = pylib.android_commands.AndroidCommands(options.device)
self.device = device_utils.DeviceUtils(adb_wrapper)
self.adb = adb_wrapper.Adb()
def PreExecution(self):
perf = perf_control.PerfControl(self.device)
perf.SetHighPerfMode()
# Remember what we have already pushed to the device.
self.pushed = set()
def PostExecution(self):
perf = perf_control.PerfControl(self.device)
perf.SetDefaultPerfMode()
self.device.RunShellCommand(["rm", "-rf", AndroidPlatform.DEVICE_DIR])
def _SendCommand(self, cmd):
logging.info("adb -s %s %s" % (str(self.device), cmd))
return self.adb.SendCommand(cmd, timeout_time=60)
def _PushFile(self, host_dir, file_name, target_rel=".",
skip_if_missing=False):
file_on_host = os.path.join(host_dir, file_name)
file_on_device_tmp = os.path.join(
AndroidPlatform.DEVICE_DIR, "_tmp_", file_name)
file_on_device = os.path.join(
AndroidPlatform.DEVICE_DIR, target_rel, file_name)
folder_on_device = os.path.dirname(file_on_device)
# Only attempt to push files that exist.
if not os.path.exists(file_on_host):
if not skip_if_missing:
logging.critical('Missing file on host: %s' % file_on_host)
return
# Only push files not yet pushed in one execution.
if file_on_host in self.pushed:
return
else:
self.pushed.add(file_on_host)
# Work-around for "text file busy" errors. Push the files to a temporary
# location and then copy them with a shell command.
output = self._SendCommand(
"push %s %s" % (file_on_host, file_on_device_tmp))
# Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)".
# Errors look like this: "failed to copy ... ".
if output and not re.search('^[0-9]', output.splitlines()[-1]):
logging.critical('PUSH FAILED: ' + output)
self._SendCommand("shell mkdir -p %s" % folder_on_device)
self._SendCommand("shell cp %s %s" % (file_on_device_tmp, file_on_device))
def PreTests(self, node, path):
suite_dir = os.path.abspath(os.path.dirname(path))
if node.path:
bench_rel = os.path.normpath(os.path.join(*node.path))
bench_abs = os.path.join(suite_dir, bench_rel)
else:
bench_rel = "."
bench_abs = suite_dir
self._PushFile(self.shell_dir, node.binary)
# Push external startup data. Backwards compatible for revisions where
# these files didn't exist.
self._PushFile(self.shell_dir, "natives_blob.bin", skip_if_missing=True)
self._PushFile(self.shell_dir, "snapshot_blob.bin", skip_if_missing=True)
if isinstance(node, Runnable):
self._PushFile(bench_abs, node.main, bench_rel)
for resource in node.resources:
self._PushFile(bench_abs, resource, bench_rel)
def Run(self, runnable, count):
cache = cache_control.CacheControl(self.device)
cache.DropRamCaches()
binary_on_device = AndroidPlatform.DEVICE_DIR + runnable.binary
cmd = [binary_on_device] + runnable.GetCommandFlags()
# Relative path to benchmark directory.
if runnable.path:
bench_rel = os.path.normpath(os.path.join(*runnable.path))
else:
bench_rel = "."
try:
output = self.device.RunShellCommand(
cmd,
cwd=os.path.join(AndroidPlatform.DEVICE_DIR, bench_rel),
timeout=runnable.timeout,
retries=0,
)
stdout = "\n".join(output)
print ">>> Stdout (#%d):" % (count + 1)
print stdout
except device_errors.CommandTimeoutError:
print ">>> Test timed out after %ss." % runnable.timeout
stdout = ""
return stdout
# TODO: Implement results_processor.
def Main(args):
logging.getLogger().setLevel(logging.INFO)
parser = optparse.OptionParser()
parser.add_option("--android-build-tools",
help="Path to chromium's build/android.")
parser.add_option("--arch",
help=("The architecture to run tests for, "
"'auto' or 'native' for auto-detect"),
default="x64")
parser.add_option("--buildbot",
help="Adapt to path structure used on buildbots",
default=False, action="store_true")
parser.add_option("--device",
help="The device ID to run Android tests on. If not given "
"it will be autodetected.")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
parser.add_option("--outdir", help="Base directory with compile output",
default="out")
(options, args) = parser.parse_args(args)
if len(args) == 0: # pragma: no cover
parser.print_help()
return 1
if options.arch in ["auto", "native"]: # pragma: no cover
options.arch = ARCH_GUESS
if not options.arch in SUPPORTED_ARCHS: # pragma: no cover
print "Unknown architecture %s" % options.arch
return 1
if (bool(options.arch.startswith("android")) !=
bool(options.android_build_tools)): # pragma: no cover
print ("Android architectures imply setting --android-build-tools and the "
"other way around.")
return 1
if (options.device and not
options.arch.startswith("android")): # pragma: no cover
print "Specifying a device requires an Android architecture to be used."
return 1
workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
if options.buildbot:
options.shell_dir = os.path.join(workspace, options.outdir, "Release")
else:
options.shell_dir = os.path.join(workspace, options.outdir,
"%s.release" % options.arch)
platform = Platform.GetPlatform(options)
results = Results()
for path in args:
path = os.path.abspath(path)
if not os.path.exists(path): # pragma: no cover
results.errors.append("Configuration file %s does not exist." % path)
continue
with open(path) as f:
suite = json.loads(f.read())
# If no name is given, default to the file name without .json.
suite.setdefault("name", os.path.splitext(os.path.basename(path))[0])
# Setup things common to one test suite.
platform.PreExecution()
# Build the graph/trace tree structure.
root = BuildGraphs(suite, options.arch)
# Callback to be called on each node on traversal.
def NodeCB(node):
platform.PreTests(node, path)
# Traverse graph/trace tree and interate over all runnables.
for runnable in FlattenRunnables(root, NodeCB):
print ">>> Running suite: %s" % "/".join(runnable.graphs)
def Runner():
"""Output generator that reruns several times."""
for i in xrange(0, max(1, runnable.run_count)):
# TODO(machenbach): Allow timeout per arch like with run_count per
# arch.
yield platform.Run(runnable, i)
# Let runnable iterate over all runs and handle output.
results += runnable.Run(Runner)
platform.PostExecution()
if options.json_test_results:
results.WriteToFile(options.json_test_results)
else: # pragma: no cover
print results
return min(1, len(results.errors))
if __name__ == "__main__": # pragma: no cover
sys.exit(Main(sys.argv[1:]))
|
|
import itsdangerous
from django.middleware.csrf import get_token
from django.utils.translation import ugettext_lazy as _
import waffle
from rest_framework import authentication
from rest_framework.authentication import BasicAuthentication, CSRFCheck
from rest_framework import exceptions
from addons.twofactor.models import UserSettings as TwoFactorUserSettings
from api.base import settings as api_settings
from api.base.exceptions import (
UnconfirmedAccountError, UnclaimedAccountError, DeactivatedAccountError,
MergedAccountError, InvalidAccountError, TwoFactorRequiredError,
)
from framework.auth import cas
from framework.auth.core import get_user
from osf import features
from osf.models import OSFUser, Session
from osf.utils.fields import ensure_str
from website import settings
def get_session_from_cookie(cookie_val):
"""
Given a cookie value, return the `Session` object or `None`.
:param cookie_val: the cookie
:return: the `Session` object or None
"""
try:
session_id = ensure_str(itsdangerous.Signer(settings.SECRET_KEY).unsign(cookie_val))
except itsdangerous.BadSignature:
return None
try:
session = Session.objects.get(_id=session_id)
return session
except Session.DoesNotExist:
return None
def check_user(user):
"""
Check and verify user status.
registered confirmed disabled merged usable-password
ACTIVE: x x o o x
NOT_CONFIRMED (default) : o o o o x
NOT_CONFIRMED (external) : o o o o o
NOT_CLAIMED : o o o o o
DISABLED : x x x o x
USER_MERGED : x x o x o
Unlike users created via username-password signup, unconfirmed accounts created by an external
IdP (e.g. ORCiD Login) have unusable passwords. To detect them, check the ``external_identity``
property of the user object. See ``created_by_external_idp_and_unconfirmed()`` for details.
:param user: the user object to check
:raises `UnconfirmedAccountError` if the user was created via default useraname / password
sign-up, or if via ORCiD login with pending status "LINK" or "CREATE" to confirm
:raises `UnclaimedAccountError` if the user was created as an unregistered contributor of a
project or group waiting to be claimed
:raises `DeactivatedAccountError` if the user has been disabled / deactivated
:raises `MergedAccountError` if the user has been merged into another account
:raises `InvalidAccountError` if the user is not active and not of the expected inactive status
:returns nothing if user is active and no exception is raised
"""
# An active user must be registered, claimed, confirmed, not merged, not disabled, and either
# has a usable password or has a verified external identity.
if user.is_active:
return
# The user has been disabled / deactivated
if user.is_disabled:
raise DeactivatedAccountError
# The user has been merged into another one
if user.is_merged:
raise MergedAccountError
# The user has not been confirmed or claimed
if not user.is_confirmed and not user.is_registered:
if user.has_usable_password() or created_by_external_idp_and_unconfirmed(user):
raise UnconfirmedAccountError
raise UnclaimedAccountError
# For all other cases, the user status is invalid. Although such status can't be reached with
# normal user-facing web application flow, it is still possible as a result of direct database
# access, coding bugs, database corruption, etc.
raise InvalidAccountError
def created_by_external_idp_and_unconfirmed(user):
"""Check if the user is created by external IdP and unconfirmed.
There are only three possible values that indicates the status of a user's external identity:
'LINK', 'CREATE' and 'VERIFIED'. Only 'CREATE' indicates that the user is newly created by an
external IdP and is unconfirmed.
"""
return 'CREATE' in set(sum([list(each.values()) for each in list(user.external_identity.values())], []))
# Three customized DRF authentication classes: basic, session/cookie and access token.
# See http://www.django-rest-framework.org/api-guide/authentication/#custom-authentication
class OSFSessionAuthentication(authentication.BaseAuthentication):
"""
Custom DRF authentication class for API call with OSF cookie/session.
"""
def authenticate(self, request):
"""
If request bears an OSF cookie, retrieve the session and verify the user.
:param request: the request
:return: the user
"""
cookie_val = request.COOKIES.get(settings.COOKIE_NAME)
if not cookie_val:
return None
session = get_session_from_cookie(cookie_val)
if not session:
return None
user_id = session.data.get('auth_user_id')
user = OSFUser.load(user_id)
if user:
if waffle.switch_is_active(features.ENFORCE_CSRF):
self.enforce_csrf(request)
# CSRF passed with authenticated user
check_user(user)
return user, None
return None
def enforce_csrf(self, request):
"""
Same implementation as django-rest-framework's SessionAuthentication.
Enforce CSRF validation for session based authentication.
"""
reason = CSRFCheck().process_view(request, None, (), {})
if reason:
# CSRF failed, bail with explicit error message
raise exceptions.PermissionDenied('CSRF Failed: %s' % reason)
if not request.COOKIES.get(api_settings.CSRF_COOKIE_NAME):
# Make sure the CSRF cookie is set for next time
get_token(request)
class OSFBasicAuthentication(BasicAuthentication):
"""
Custom DRF authentication class for API call with email, password, and two-factor if necessary.
"""
def authenticate(self, request):
"""
Overwrite BasicAuthentication to authenticate by email, password and two-factor code.
`authenticate_credentials` handles email and password,
`authenticate_twofactor_credentials` handles two-factor.
:param request: the request
:return: a tuple of the user and error messages
"""
user_auth_tuple = super(OSFBasicAuthentication, self).authenticate(request)
if user_auth_tuple is not None:
self.authenticate_twofactor_credentials(user_auth_tuple[0], request)
return user_auth_tuple
def authenticate_credentials(self, userid, password, request=None):
"""
Authenticate the user by userid (email) and password.
:param userid: the username or email
:param password: the password
:return: the User
:raises: NotAuthenticated
:raises: AuthenticationFailed
"""
user = get_user(email=userid, password=password)
if userid and not user:
raise exceptions.AuthenticationFailed(_('Invalid username/password.'))
elif userid is None or not password:
raise exceptions.NotAuthenticated()
check_user(user)
return user, None
@staticmethod
def authenticate_twofactor_credentials(user, request):
"""
Authenticate the user's two-factor one time password code.
:param user: the user
:param request: the request
:raises TwoFactorRequiredError
:raises AuthenticationFailed
"""
try:
two_factor = TwoFactorUserSettings.objects.get(owner_id=user.pk)
except TwoFactorUserSettings.DoesNotExist:
two_factor = None
if two_factor and two_factor.is_confirmed:
otp = request.META.get('HTTP_X_OSF_OTP')
if otp is None:
raise TwoFactorRequiredError()
if not two_factor.verify_code(otp):
raise exceptions.AuthenticationFailed(_('Invalid two-factor authentication OTP code.'))
def authenticate_header(self, request):
"""
Returns custom value other than "Basic" to prevent BasicAuth dialog prompt when returning 401
"""
return 'Documentation realm="{}"'.format(self.www_authenticate_realm)
class OSFCASAuthentication(authentication.BaseAuthentication):
def authenticate(self, request):
"""
Check whether the request provides a valid OAuth2 bearer token.
The `user` in `cas_auth_response` is the unique GUID of the user. Please do not use
the primary key `id` or the email `username`.
:param request: the request
:return: the user who owns the bear token and the cas repsonse
"""
client = cas.get_client()
try:
auth_header_field = request.META['HTTP_AUTHORIZATION']
auth_token = cas.parse_auth_header(auth_header_field)
except (cas.CasTokenError, KeyError):
return None
try:
cas_auth_response = client.profile(auth_token)
except cas.CasHTTPError:
raise exceptions.NotAuthenticated(_('User provided an invalid OAuth2 access token'))
if cas_auth_response.authenticated is False:
raise exceptions.NotAuthenticated(_('CAS server failed to authenticate this token'))
user = OSFUser.load(cas_auth_response.user)
if not user:
raise exceptions.AuthenticationFailed(_('Could not find the user associated with this token'))
check_user(user)
return user, cas_auth_response
def authenticate_header(self, request):
"""
Return an empty string.
"""
return ''
|
|
"""Support for DoorBird devices."""
import asyncio
import logging
import urllib
from urllib.error import HTTPError
from aiohttp import web
from doorbirdpy import DoorBird
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_DEVICES,
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_TOKEN,
CONF_USERNAME,
HTTP_OK,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.network import get_url
from homeassistant.util import dt as dt_util, slugify
from .const import (
CONF_EVENTS,
DOMAIN,
DOOR_STATION,
DOOR_STATION_EVENT_ENTITY_IDS,
DOOR_STATION_INFO,
PLATFORMS,
)
from .util import get_doorstation_by_token
_LOGGER = logging.getLogger(__name__)
API_URL = f"/api/{DOMAIN}"
CONF_CUSTOM_URL = "hass_url_override"
RESET_DEVICE_FAVORITES = "doorbird_reset_favorites"
DEVICE_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_EVENTS, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_CUSTOM_URL): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{vol.Required(CONF_DEVICES): vol.All(cv.ensure_list, [DEVICE_SCHEMA])}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the DoorBird component."""
hass.data.setdefault(DOMAIN, {})
# Provide an endpoint for the doorstations to call to trigger events
hass.http.register_view(DoorBirdRequestView)
if DOMAIN in config and CONF_DEVICES in config[DOMAIN]:
for index, doorstation_config in enumerate(config[DOMAIN][CONF_DEVICES]):
if CONF_NAME not in doorstation_config:
doorstation_config[CONF_NAME] = f"DoorBird {index + 1}"
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=doorstation_config
)
)
def _reset_device_favorites_handler(event):
"""Handle clearing favorites on device."""
token = event.data.get("token")
if token is None:
return
doorstation = get_doorstation_by_token(hass, token)
if doorstation is None:
_LOGGER.error("Device not found for provided token.")
return
# Clear webhooks
favorites = doorstation.device.favorites()
for favorite_type in favorites:
for favorite_id in favorites[favorite_type]:
doorstation.device.delete_favorite(favorite_type, favorite_id)
hass.bus.async_listen(RESET_DEVICE_FAVORITES, _reset_device_favorites_handler)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up DoorBird from a config entry."""
_async_import_options_from_data_if_missing(hass, entry)
doorstation_config = entry.data
doorstation_options = entry.options
config_entry_id = entry.entry_id
device_ip = doorstation_config[CONF_HOST]
username = doorstation_config[CONF_USERNAME]
password = doorstation_config[CONF_PASSWORD]
device = DoorBird(device_ip, username, password)
try:
status = await hass.async_add_executor_job(device.ready)
info = await hass.async_add_executor_job(device.info)
except urllib.error.HTTPError as err:
if err.code == 401:
_LOGGER.error(
"Authorization rejected by DoorBird for %s@%s", username, device_ip
)
return False
raise ConfigEntryNotReady
except OSError as oserr:
_LOGGER.error("Failed to setup doorbird at %s: %s", device_ip, oserr)
raise ConfigEntryNotReady
if not status[0]:
_LOGGER.error(
"Could not connect to DoorBird as %s@%s: Error %s",
username,
device_ip,
str(status[1]),
)
raise ConfigEntryNotReady
token = doorstation_config.get(CONF_TOKEN, config_entry_id)
custom_url = doorstation_config.get(CONF_CUSTOM_URL)
name = doorstation_config.get(CONF_NAME)
events = doorstation_options.get(CONF_EVENTS, [])
doorstation = ConfiguredDoorBird(device, name, events, custom_url, token)
# Subscribe to doorbell or motion events
if not await _async_register_events(hass, doorstation):
raise ConfigEntryNotReady
hass.data[DOMAIN][config_entry_id] = {
DOOR_STATION: doorstation,
DOOR_STATION_INFO: info,
}
entry.add_update_listener(_update_listener)
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def _async_register_events(hass, doorstation):
try:
await hass.async_add_executor_job(doorstation.register_events, hass)
except HTTPError:
hass.components.persistent_notification.create(
"Doorbird configuration failed. Please verify that API "
"Operator permission is enabled for the Doorbird user. "
"A restart will be required once permissions have been "
"verified.",
title="Doorbird Configuration Failure",
notification_id="doorbird_schedule_error",
)
return False
return True
async def _update_listener(hass: HomeAssistant, entry: ConfigEntry):
"""Handle options update."""
config_entry_id = entry.entry_id
doorstation = hass.data[DOMAIN][config_entry_id][DOOR_STATION]
doorstation.events = entry.options[CONF_EVENTS]
# Subscribe to doorbell or motion events
await _async_register_events(hass, doorstation)
@callback
def _async_import_options_from_data_if_missing(hass: HomeAssistant, entry: ConfigEntry):
options = dict(entry.options)
modified = False
for importable_option in [CONF_EVENTS]:
if importable_option not in entry.options and importable_option in entry.data:
options[importable_option] = entry.data[importable_option]
modified = True
if modified:
hass.config_entries.async_update_entry(entry, options=options)
class ConfiguredDoorBird:
"""Attach additional information to pass along with configured device."""
def __init__(self, device, name, events, custom_url, token):
"""Initialize configured device."""
self._name = name
self._device = device
self._custom_url = custom_url
self.events = events
self.doorstation_events = [self._get_event_name(event) for event in self.events]
self._token = token
@property
def name(self):
"""Get custom device name."""
return self._name
@property
def device(self):
"""Get the configured device."""
return self._device
@property
def custom_url(self):
"""Get custom url for device."""
return self._custom_url
@property
def token(self):
"""Get token for device."""
return self._token
def register_events(self, hass):
"""Register events on device."""
# Get the URL of this server
hass_url = get_url(hass)
# Override url if another is specified in the configuration
if self.custom_url is not None:
hass_url = self.custom_url
for event in self.doorstation_events:
self._register_event(hass_url, event)
_LOGGER.info("Successfully registered URL for %s on %s", event, self.name)
@property
def slug(self):
"""Get device slug."""
return slugify(self._name)
def _get_event_name(self, event):
return f"{self.slug}_{event}"
def _register_event(self, hass_url, event):
"""Add a schedule entry in the device for a sensor."""
url = f"{hass_url}{API_URL}/{event}?token={self._token}"
# Register HA URL as webhook if not already, then get the ID
if not self.webhook_is_registered(url):
self.device.change_favorite("http", f"Home Assistant ({event})", url)
fav_id = self.get_webhook_id(url)
if not fav_id:
_LOGGER.warning(
'Could not find favorite for URL "%s". ' 'Skipping sensor "%s"',
url,
event,
)
return
def webhook_is_registered(self, url, favs=None) -> bool:
"""Return whether the given URL is registered as a device favorite."""
favs = favs if favs else self.device.favorites()
if "http" not in favs:
return False
for fav in favs["http"].values():
if fav["value"] == url:
return True
return False
def get_webhook_id(self, url, favs=None) -> str or None:
"""
Return the device favorite ID for the given URL.
The favorite must exist or there will be problems.
"""
favs = favs if favs else self.device.favorites()
if "http" not in favs:
return None
for fav_id in favs["http"]:
if favs["http"][fav_id]["value"] == url:
return fav_id
return None
def get_event_data(self):
"""Get data to pass along with HA event."""
return {
"timestamp": dt_util.utcnow().isoformat(),
"live_video_url": self._device.live_video_url,
"live_image_url": self._device.live_image_url,
"rtsp_live_video_url": self._device.rtsp_live_video_url,
"html5_viewer_url": self._device.html5_viewer_url,
}
class DoorBirdRequestView(HomeAssistantView):
"""Provide a page for the device to call."""
requires_auth = False
url = API_URL
name = API_URL[1:].replace("/", ":")
extra_urls = [API_URL + "/{event}"]
async def get(self, request, event):
"""Respond to requests from the device."""
hass = request.app["hass"]
token = request.query.get("token")
device = get_doorstation_by_token(hass, token)
if device is None:
return web.Response(status=401, text="Invalid token provided.")
if device:
event_data = device.get_event_data()
else:
event_data = {}
if event == "clear":
hass.bus.async_fire(RESET_DEVICE_FAVORITES, {"token": token})
message = f"HTTP Favorites cleared for {device.slug}"
return web.Response(status=HTTP_OK, text=message)
event_data[ATTR_ENTITY_ID] = hass.data[DOMAIN][
DOOR_STATION_EVENT_ENTITY_IDS
].get(event)
hass.bus.async_fire(f"{DOMAIN}_{event}", event_data)
return web.Response(status=HTTP_OK, text="OK")
|
|
#!/usr/bin/env python
# ===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ===============================================================================
from datacube.api.query import SortType
__author__ = "Simon Oldfield"
import calendar
import logging
import os
import gdal
import numpy
from datacube.api.model import DatasetType, Ls57Arg25Bands, Satellite, Ls8Arg25Bands
from datacube.api.utils import NDV, empty_array, get_dataset_metadata, get_dataset_data_with_pq, raster_create, \
get_dataset_data
from datacube.api.workflow import SummaryTask, CellTask, Workflow
_log = logging.getLogger()
class LandsatMosaicSummaryTask(SummaryTask):
def create_cell_tasks(self, x, y):
return LandsatMosaicCellTask(x=x, y=y, acq_min=self.acq_min, acq_max=self.acq_max,
satellites=self.satellites, output_directory=self.output_directory, csv=self.csv,
dummy=self.dummy, save_input_files=self.save_input_files, apply_pq_filter=self.apply_pq_filter)
class LandsatMosaicCellTask(CellTask):
def get_output_paths(self):
return [self.get_output_path(dataset=dataset)
#for dataset in ["NBAR", "SAT", "EPOCH"]]
for dataset in ["NBAR", "SAT", "DATE"]]
def get_output_path(self, dataset):
return os.path.join(self.output_directory,
"LS_{dataset}_{x:03d}_{y:04d}_{acq_min}_{acq_max}.tif".format(dataset=dataset, x=self.x,
y=self.y,
acq_min=self.acq_min,
acq_max=self.acq_max))
def doit(self):
shape = (4000, 4000)
no_data_value = NDV
best_pixel_data = dict()
# TODO
if Satellite.LS8.value in self.satellites:
bands = Ls8Arg25Bands
else:
bands = Ls57Arg25Bands
for band in bands:
best_pixel_data[band] = empty_array(shape=shape, dtype=numpy.int16, ndv=no_data_value)
best_pixel_satellite = empty_array(shape=shape, dtype=numpy.int16, ndv=NDV)
# best_pixel_epoch = empty_array(shape=shape, dtype=numpy.int32, ndv=NDV)
best_pixel_date = empty_array(shape=shape, dtype=numpy.int32, ndv=NDV)
current_satellite = empty_array(shape=shape, dtype=numpy.int16, ndv=NDV)
# current_epoch = empty_array(shape=shape, dtype=numpy.int32, ndv=NDV)
current_date = empty_array(shape=shape, dtype=numpy.int32, ndv=NDV)
metadata = None
SATELLITE_DATA_VALUES = {Satellite.LS5: 5, Satellite.LS7: 7, Satellite.LS8: 8}
for tile in self.get_tiles(sort=SortType.DESC):
# Get ARG25 dataset
dataset = tile.datasets[DatasetType.ARG25]
_log.info("Processing ARG tile [%s]", dataset.path)
if not metadata:
metadata = get_dataset_metadata(dataset)
band_data = None
if self.apply_pq_filter:
band_data = get_dataset_data_with_pq(dataset, tile.datasets[DatasetType.PQ25])
else:
band_data = get_dataset_data(dataset)
# Create the provenance datasets
# NOTE: need to do this BEFORE selecting the pixel since it is actually using the fact that the
# selected pixel currently doesn't have a value
# NOTE: band values are propagated "as a job lot" so can just check any band
# TODO better way than just saying....RED....?
band = bands.RED
# Satellite
current_satellite.fill(SATELLITE_DATA_VALUES[dataset.satellite])
best_pixel_satellite = numpy.where(best_pixel_data[band] == no_data_value, current_satellite, best_pixel_satellite)
# # Epoch dataset
#
# current_epoch.fill(calendar.timegm(tile.end_datetime.timetuple()))
# best_pixel_epoch = numpy.where(best_pixel_data[band] == no_data_value, current_epoch, best_pixel_epoch)
# Date dataset (20150101)
current_date.fill(tile.end_datetime.year * 10000 + tile.end_datetime.month * 100 + tile.end_datetime.day)
best_pixel_date = numpy.where(best_pixel_data[band] == no_data_value, current_date, best_pixel_date)
for band in bands:
data = band_data[band]
# _log.debug("data = \n%s", data)
# Replace any NO DATA best pixels with data pixels
# TODO should I explicitly do the AND data is not NO DATA VALUE?
best_pixel_data[band] = numpy.where(best_pixel_data[band] == no_data_value, data, best_pixel_data[band])
# _log.debug("best pixel = \n%s", best_pixel_data[band])
still_no_data = numpy.any(numpy.array([best_pixel_data[b] for b in bands]) == no_data_value)
# _log.debug("still no data pixels = %s", still_no_data)
if not still_no_data:
break
# Now want to mask out values in the provenance datasets if we haven't actually got a value
# TODO better way than just saying....RED....?
band = bands.RED
mask = numpy.ma.masked_equal(best_pixel_data[band], NDV).mask
best_pixel_satellite = numpy.ma.array(best_pixel_satellite, mask=mask).filled(NDV)
# best_pixel_epoch = numpy.ma.array(best_pixel_epoch, mask=mask).fill(NDV)
best_pixel_date = numpy.ma.array(best_pixel_date, mask=mask).filled(NDV)
# Composite NBAR dataset
raster_create(self.get_output_path("NBAR"), [best_pixel_data[b] for b in bands],
metadata.transform, metadata.projection, NDV, gdal.GDT_Int16)
# Provenance (satellite) dataset
raster_create(self.get_output_path("SAT"),
[best_pixel_satellite],
metadata.transform, metadata.projection, no_data_value,
gdal.GDT_Int16)
# # Provenance (epoch) dataset
#
# raster_create(self.get_output_path("EPOCH"),
# [best_pixel_epoch],
# metadata.transform, metadata.projection, no_data_value,
# gdal.GDT_Int32)
# Provenance (day of month) dataset
raster_create(self.get_output_path("DATE"),
[best_pixel_date],
metadata.transform, metadata.projection, no_data_value,
gdal.GDT_Int32)
class LandsatMosaicWorkflow(Workflow):
def __init__(self):
Workflow.__init__(self, application_name="Landsat Mosaic")
def create_summary_tasks(self):
return [LandsatMosaicSummaryTask(x_min=self.x_min, x_max=self.x_max, y_min=self.y_min, y_max=self.y_max,
acq_min=self.acq_min, acq_max=self.acq_max, satellites=self.satellites,
output_directory=self.output_directory, csv=self.csv, dummy=self.dummy,
save_input_files=self.save_input_files, apply_pq_filter=self.apply_pq_filter)]
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
LandsatMosaicWorkflow().run()
|
|
"""Elliptical geometrical entities.
Contains
* Ellipse
* Circle
"""
from __future__ import print_function, division
from sympy.core import S, C, sympify, pi, Dummy
from sympy.core.logic import fuzzy_bool
from sympy.core.numbers import oo, zoo, Rational
from sympy.simplify import simplify, trigsimp
from sympy.functions.elementary.miscellaneous import sqrt, Max, Min
from sympy.functions.elementary.complexes import im
from sympy.geometry.exceptions import GeometryError
from sympy.polys import Poly, PolynomialError, DomainError
from sympy.solvers import solve
from sympy.utilities.lambdify import lambdify
from sympy.utilities.iterables import uniq
from sympy.utilities.misc import filldedent
from .entity import GeometryEntity
from .point import Point
from .line import LinearEntity, Line
from .util import _symbol, idiff
from sympy.mpmath import findroot as nroot
import random
from sympy.utilities.decorator import doctest_depends_on
class Ellipse(GeometryEntity):
"""An elliptical GeometryEntity.
Parameters
==========
center : Point, optional
Default value is Point(0, 0)
hradius : number or SymPy expression, optional
vradius : number or SymPy expression, optional
eccentricity : number or SymPy expression, optional
Two of `hradius`, `vradius` and `eccentricity` must be supplied to
create an Ellipse. The third is derived from the two supplied.
Attributes
==========
center
hradius
vradius
area
circumference
eccentricity
periapsis
apoapsis
focus_distance
foci
Raises
======
GeometryError
When `hradius`, `vradius` and `eccentricity` are incorrectly supplied
as parameters.
TypeError
When `center` is not a Point.
See Also
========
Circle
Notes
-----
Constructed from a center and two radii, the first being the horizontal
radius (along the x-axis) and the second being the vertical radius (along
the y-axis).
When symbolic value for hradius and vradius are used, any calculation that
refers to the foci or the major or minor axis will assume that the ellipse
has its major radius on the x-axis. If this is not true then a manual
rotation is necessary.
Examples
========
>>> from sympy import Ellipse, Point, Rational
>>> e1 = Ellipse(Point(0, 0), 5, 1)
>>> e1.hradius, e1.vradius
(5, 1)
>>> e2 = Ellipse(Point(3, 1), hradius=3, eccentricity=Rational(4, 5))
>>> e2
Ellipse(Point(3, 1), 3, 9/5)
Plotting:
>>> from sympy.plotting.pygletplot import PygletPlot as Plot
>>> from sympy import Circle, Segment
>>> c1 = Circle(Point(0,0), 1)
>>> Plot(c1) # doctest: +SKIP
[0]: cos(t), sin(t), 'mode=parametric'
>>> p = Plot() # doctest: +SKIP
>>> p[0] = c1 # doctest: +SKIP
>>> radius = Segment(c1.center, c1.random_point())
>>> p[1] = radius # doctest: +SKIP
>>> p # doctest: +SKIP
[0]: cos(t), sin(t), 'mode=parametric'
[1]: t*cos(1.546086215036205357975518382),
t*sin(1.546086215036205357975518382), 'mode=parametric'
"""
def __new__(
cls, center=None, hradius=None, vradius=None, eccentricity=None,
**kwargs):
hradius = sympify(hradius)
vradius = sympify(vradius)
eccentricity = sympify(eccentricity)
if center is None:
center = Point(0, 0)
else:
center = Point(center)
if len(list(filter(None, (hradius, vradius, eccentricity)))) != 2:
raise ValueError('Exactly two arguments of "hradius", '
'"vradius", and "eccentricity" must not be None."')
if eccentricity is not None:
if hradius is None:
hradius = vradius / sqrt(1 - eccentricity**2)
elif vradius is None:
vradius = hradius * sqrt(1 - eccentricity**2)
if hradius == vradius:
return Circle(center, hradius, **kwargs)
return GeometryEntity.__new__(cls, center, hradius, vradius, **kwargs)
@property
def center(self):
"""The center of the ellipse.
Returns
=======
center : number
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.center
Point(0, 0)
"""
return self.args[0]
@property
def hradius(self):
"""The horizontal radius of the ellipse.
Returns
=======
hradius : number
See Also
========
vradius, major, minor
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.hradius
3
"""
return self.args[1]
@property
def vradius(self):
"""The vertical radius of the ellipse.
Returns
=======
vradius : number
See Also
========
hradius, major, minor
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.vradius
1
"""
return self.args[2]
@property
def minor(self):
"""Shorter axis of the ellipse (if it can be determined) else vradius.
Returns
=======
minor : number or expression
See Also
========
hradius, vradius, major
Examples
========
>>> from sympy import Point, Ellipse, Symbol
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.minor
1
>>> a = Symbol('a')
>>> b = Symbol('b')
>>> Ellipse(p1, a, b).minor
b
>>> Ellipse(p1, b, a).minor
a
>>> m = Symbol('m')
>>> M = m + 1
>>> Ellipse(p1, m, M).minor
m
"""
rv = Min(*self.args[1:3])
if rv.func is Min:
return self.vradius
return rv
@property
def major(self):
"""Longer axis of the ellipse (if it can be determined) else hradius.
Returns
=======
major : number or expression
See Also
========
hradius, vradius, minor
Examples
========
>>> from sympy import Point, Ellipse, Symbol
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.major
3
>>> a = Symbol('a')
>>> b = Symbol('b')
>>> Ellipse(p1, a, b).major
a
>>> Ellipse(p1, b, a).major
b
>>> m = Symbol('m')
>>> M = m + 1
>>> Ellipse(p1, m, M).major
m + 1
"""
rv = Max(*self.args[1:3])
if rv.func is Max:
return self.hradius
return rv
@property
def area(self):
"""The area of the ellipse.
Returns
=======
area : number
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.area
3*pi
"""
return simplify(S.Pi * self.hradius * self.vradius)
@property
def circumference(self):
"""The circumference of the ellipse.
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.circumference
12*Integral(sqrt((-8*_x**2/9 + 1)/(-_x**2 + 1)), (_x, 0, 1))
"""
if self.eccentricity == 1:
return 2*pi*self.hradius
else:
x = C.Dummy('x', real=True)
return 4*self.major*C.Integral(
sqrt((1 - (self.eccentricity*x)**2)/(1 - x**2)), (x, 0, 1))
@property
def eccentricity(self):
"""The eccentricity of the ellipse.
Returns
=======
eccentricity : number
Examples
========
>>> from sympy import Point, Ellipse, sqrt
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, sqrt(2))
>>> e1.eccentricity
sqrt(7)/3
"""
return self.focus_distance / self.major
@property
def periapsis(self):
"""The periapsis of the ellipse.
The shortest distance between the focus and the contour.
Returns
=======
periapsis : number
See Also
========
apoapsis : Returns greatest distance between focus and contour
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.periapsis
-2*sqrt(2) + 3
"""
return self.major * (1 - self.eccentricity)
@property
def apoapsis(self):
"""The apoapsis of the ellipse.
The greatest distance between the focus and the contour.
Returns
=======
apoapsis : number
See Also
========
periapsis : Returns shortest distance between foci and contour
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.apoapsis
2*sqrt(2) + 3
"""
return self.major * (1 + self.eccentricity)
@property
def focus_distance(self):
"""The focale distance of the ellipse.
The distance between the center and one focus.
Returns
=======
focus_distance : number
See Also
========
foci
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.focus_distance
2*sqrt(2)
"""
return Point.distance(self.center, self.foci[0])
@property
def foci(self):
"""The foci of the ellipse.
Notes
-----
The foci can only be calculated if the major/minor axes are known.
Raises
======
ValueError
When the major and minor axis cannot be determined.
See Also
========
sympy.geometry.point.Point
focus_distance : Returns the distance between focus and center
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.foci
(Point(-2*sqrt(2), 0), Point(2*sqrt(2), 0))
"""
c = self.center
hr, vr = self.hradius, self.vradius
if hr == vr:
return (c, c)
# calculate focus distance manually, since focus_distance calls this
# routine
fd = sqrt(self.major**2 - self.minor**2)
if hr == self.minor:
# foci on the y-axis
return (c + Point(0, -fd), c + Point(0, fd))
elif hr == self.major:
# foci on the x-axis
return (c + Point(-fd, 0), c + Point(fd, 0))
def rotate(self, angle=0, pt=None):
"""Rotate ``angle`` radians counterclockwise about Point ``pt``.
Note: since the general ellipse is not supported, only rotations that
are integer multiples of pi/2 are allowed.
Examples
========
>>> from sympy import Ellipse, pi
>>> Ellipse((1, 0), 2, 1).rotate(pi/2)
Ellipse(Point(0, 1), 1, 2)
>>> Ellipse((1, 0), 2, 1).rotate(pi)
Ellipse(Point(-1, 0), 2, 1)
"""
if self.hradius == self.vradius:
return self.func(*self.args)
if (angle/S.Pi).is_integer:
return super(Ellipse, self).rotate(angle, pt)
if (2*angle/S.Pi).is_integer:
return self.func(self.center.rotate(angle, pt), self.vradius, self.hradius)
# XXX see https://github.com/sympy/sympy/issues/2815 for general ellipes
raise NotImplementedError('Only rotations of pi/2 are currently supported for Ellipse.')
def scale(self, x=1, y=1, pt=None):
"""Override GeometryEntity.scale since it is the major and minor
axes which must be scaled and they are not GeometryEntities.
Examples
========
>>> from sympy import Ellipse
>>> Ellipse((0, 0), 2, 1).scale(2, 4)
Circle(Point(0, 0), 4)
>>> Ellipse((0, 0), 2, 1).scale(2)
Ellipse(Point(0, 0), 4, 1)
"""
c = self.center
if pt:
pt = Point(pt)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
h = self.hradius
v = self.vradius
return self.func(c.scale(x, y), hradius=h*x, vradius=v*y)
def reflect(self, line):
"""Override GeometryEntity.reflect since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle, Line
>>> Circle((0, 1), 1).reflect(Line((0, 0), (1, 1)))
Circle(Point(1, 0), -1)
>>> from sympy import Ellipse, Line, Point
>>> Ellipse(Point(3, 4), 1, 3).reflect(Line(Point(0, -4), Point(5, 0)))
Traceback (most recent call last):
...
NotImplementedError:
General Ellipse is not supported but the equation of the reflected
Ellipse is given by the zeros of: f(x, y) = (9*x/41 + 40*y/41 +
37/41)**2 + (40*x/123 - 3*y/41 - 364/123)**2 - 1
Notes
=====
Until the general ellipse (with no axis parallel to the x-axis) is
supported a NotImplemented error is raised and the equation whose
zeros define the rotated ellipse is given.
"""
from .util import _uniquely_named_symbol
if line.slope in (0, oo):
c = self.center
c = c.reflect(line)
return self.func(c, -self.hradius, self.vradius)
else:
x, y = [_uniquely_named_symbol(name, self, line) for name in 'xy']
expr = self.equation(x, y)
p = Point(x, y).reflect(line)
result = expr.subs(zip((x, y), p.args
), simultaneous=True)
raise NotImplementedError(filldedent(
'General Ellipse is not supported but the equation '
'of the reflected Ellipse is given by the zeros of: ' +
"f(%s, %s) = %s" % (str(x), str(y), str(result))))
def encloses_point(self, p):
"""
Return True if p is enclosed by (is inside of) self.
Notes
-----
Being on the border of self is considered False.
Parameters
==========
p : Point
Returns
=======
encloses_point : True, False or None
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Ellipse, S
>>> from sympy.abc import t
>>> e = Ellipse((0, 0), 3, 2)
>>> e.encloses_point((0, 0))
True
>>> e.encloses_point(e.arbitrary_point(t).subs(t, S.Half))
False
>>> e.encloses_point((4, 0))
False
"""
p = Point(p)
if p in self:
return False
if len(self.foci) == 2:
# if the combined distance from the foci to p (h1 + h2) is less
# than the combined distance from the foci to the minor axis
# (which is the same as the major axis length) then p is inside
# the ellipse
h1, h2 = [f.distance(p) for f in self.foci]
test = 2*self.major - (h1 + h2)
else:
test = self.radius - self.center.distance(p)
return fuzzy_bool(test.is_positive)
@doctest_depends_on(modules=('pyglet',))
def tangent_lines(self, p):
"""Tangent lines between `p` and the ellipse.
If `p` is on the ellipse, returns the tangent line through point `p`.
Otherwise, returns the tangent line(s) from `p` to the ellipse, or
None if no tangent line is possible (e.g., `p` inside ellipse).
Parameters
==========
p : Point
Returns
=======
tangent_lines : list with 1 or 2 Lines
Raises
======
NotImplementedError
Can only find tangent lines for a point, `p`, on the ellipse.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Line
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.tangent_lines(Point(3, 0))
[Line(Point(3, 0), Point(3, -12))]
>>> # This will plot an ellipse together with a tangent line.
>>> from sympy.plotting.pygletplot import PygletPlot as Plot
>>> from sympy import Point, Ellipse
>>> e = Ellipse(Point(0,0), 3, 2)
>>> t = e.tangent_lines(e.random_point())
>>> p = Plot()
>>> p[0] = e # doctest: +SKIP
>>> p[1] = t # doctest: +SKIP
"""
p = Point(p)
if self.encloses_point(p):
return []
if p in self:
delta = self.center - p
rise = (self.vradius ** 2)*delta.x
run = -(self.hradius ** 2)*delta.y
p2 = Point(simplify(p.x + run),
simplify(p.y + rise))
return [Line(p, p2)]
else:
if len(self.foci) == 2:
f1, f2 = self.foci
maj = self.hradius
test = (2*maj -
Point.distance(f1, p) -
Point.distance(f2, p))
else:
test = self.radius - Point.distance(self.center, p)
if test.is_number and test.is_positive:
return []
# else p is outside the ellipse or we can't tell. In case of the
# latter, the solutions returned will only be valid if
# the point is not inside the ellipse; if it is, nan will result.
x, y = Dummy('x'), Dummy('y')
eq = self.equation(x, y)
dydx = idiff(eq, y, x)
slope = Line(p, Point(x, y)).slope
tangent_points = solve([slope - dydx, eq], [x, y])
# handle horizontal and vertical tangent lines
if len(tangent_points) == 1:
assert tangent_points[0][
0] == p.x or tangent_points[0][1] == p.y
return [Line(p, p + Point(1, 0)), Line(p, p + Point(0, 1))]
# others
return [Line(p, tangent_points[0]), Line(p, tangent_points[1])]
def is_tangent(self, o):
"""Is `o` tangent to the ellipse?
Parameters
==========
o : GeometryEntity
An Ellipse, LinearEntity or Polygon
Raises
======
NotImplementedError
When the wrong type of argument is supplied.
Returns
=======
is_tangent: boolean
True if o is tangent to the ellipse, False otherwise.
See Also
========
tangent_lines
Examples
========
>>> from sympy import Point, Ellipse, Line
>>> p0, p1, p2 = Point(0, 0), Point(3, 0), Point(3, 3)
>>> e1 = Ellipse(p0, 3, 2)
>>> l1 = Line(p1, p2)
>>> e1.is_tangent(l1)
True
"""
inter = None
if isinstance(o, Ellipse):
inter = self.intersection(o)
if isinstance(inter, Ellipse):
return False
return (inter is not None and len(inter) == 1
and isinstance(inter[0], Point))
elif isinstance(o, LinearEntity):
inter = self._do_line_intersection(o)
if inter is not None and len(inter) == 1:
return inter[0] in o
else:
return False
elif isinstance(o, Polygon):
c = 0
for seg in o.sides:
inter = self._do_line_intersection(seg)
c += len([True for point in inter if point in seg])
return c == 1
else:
raise NotImplementedError("Unknown argument type")
def normal_lines(self, p, prec=None):
"""Normal lines between `p` and the ellipse.
Parameters
==========
p : Point
Returns
=======
normal_lines : list with 1, 2 or 4 Lines
Examples
========
>>> from sympy import Line, Point, Ellipse
>>> e = Ellipse((0, 0), 2, 3)
>>> c = e.center
>>> e.normal_lines(c + Point(1, 0))
[Line(Point(0, 0), Point(1, 0))]
>>> e.normal_lines(c)
[Line(Point(0, 0), Point(0, 1)), Line(Point(0, 0), Point(1, 0))]
Off-axis points require the solution of a quartic equation. This
often leads to very large expressions that may be of little practical
use. An approximate solution of `prec` digits can be obtained by
passing in the desired value:
>>> e.normal_lines((3, 3), prec=2)
[Line(Point(-38/47, -85/31), Point(9/47, -21/17)),
Line(Point(19/13, -43/21), Point(32/13, -8/3))]
Whereas the above solution has an operation count of 12, the exact
solution has an operation count of 2020.
"""
p = Point(p)
# XXX change True to something like self.angle == 0 if the arbitrarily
# rotated ellipse is introduced.
# https://github.com/sympy/sympy/issues/2815)
if True:
rv = []
if p.x == self.center.x:
rv.append(Line(self.center, slope=oo))
if p.y == self.center.y:
rv.append(Line(self.center, slope=0))
if rv:
# at these special orientations of p either 1 or 2 normals
# exist and we are done
return rv
# find the 4 normal points and construct lines through them with
# the corresponding slope
x, y = Dummy('x', real=True), Dummy('y', real=True)
eq = self.equation(x, y)
dydx = idiff(eq, y, x)
norm = -1/dydx
slope = Line(p, (x, y)).slope
seq = slope - norm
points = []
if prec is not None:
yis = solve(seq, y)[0]
xeq = eq.subs(y, yis).as_numer_denom()[0].expand()
try:
iv = list(zip(*Poly(xeq, x).intervals()))[0]
# bisection is safest here since other methods may miss root
xsol = [S(nroot(lambdify(x, xeq), i, solver="anderson"))
for i in iv]
points = [Point(i, solve(eq.subs(x, i), y)[0]).n(prec)
for i in xsol]
except (DomainError, PolynomialError):
xvals = solve(xeq, x)
points = [Point(xis, yis.xreplace({x: xis})) for xis in xvals]
points = [pt.n(prec) if prec is not None else pt for pt in points]
slopes = [norm.subs(zip((x, y), pt.args)) for pt in points]
if prec is not None:
slopes = [i.n(prec) if i not in (-oo, oo, zoo) else i
for i in slopes]
return [Line(pt, slope=s) for pt,s in zip(points, slopes)]
def arbitrary_point(self, parameter='t'):
"""A parameterized point on the ellipse.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
arbitrary_point : Point
Raises
======
ValueError
When `parameter` already appears in the functions.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.arbitrary_point()
Point(3*cos(t), 2*sin(t))
"""
t = _symbol(parameter)
if t.name in (f.name for f in self.free_symbols):
raise ValueError(filldedent('Symbol %s already appears in object '
'and cannot be used as a parameter.' % t.name))
return Point(self.center.x + self.hradius*C.cos(t),
self.center.y + self.vradius*C.sin(t))
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the Ellipse.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.plot_interval()
[t, -pi, pi]
"""
t = _symbol(parameter)
return [t, -S.Pi, S.Pi]
def random_point(self, seed=None):
"""A random point on the ellipse.
Returns
=======
point : Point
See Also
========
sympy.geometry.point.Point
arbitrary_point : Returns parameterized point on ellipse
Notes
-----
A random point may not appear to be on the ellipse, ie, `p in e` may
return False. This is because the coordinates of the point will be
floating point values, and when these values are substituted into the
equation for the ellipse the result may not be zero because of floating
point rounding error.
Examples
========
>>> from sympy import Point, Ellipse, Segment
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.random_point() # gives some random point
Point(...)
>>> p1 = e1.random_point(seed=0); p1.n(2)
Point(2.1, 1.4)
The random_point method assures that the point will test as being
in the ellipse:
>>> p1 in e1
True
Notes
=====
An arbitrary_point with a random value of t substituted into it may
not test as being on the ellipse because the expression tested that
a point is on the ellipse doesn't simplify to zero and doesn't evaluate
exactly to zero:
>>> from sympy.abc import t
>>> e1.arbitrary_point(t)
Point(3*cos(t), 2*sin(t))
>>> p2 = _.subs(t, 0.1)
>>> p2 in e1
False
Note that arbitrary_point routine does not take this approach. A value
for cos(t) and sin(t) (not t) is substituted into the arbitrary point.
There is a small chance that this will give a point that will not
test as being in the ellipse, so the process is repeated (up to 10
times) until a valid point is obtained.
"""
from sympy import sin, cos, Rational
t = _symbol('t')
x, y = self.arbitrary_point(t).args
# get a random value in [-1, 1) corresponding to cos(t)
# and confirm that it will test as being in the ellipse
if seed is not None:
rng = random.Random(seed)
else:
rng = random
for i in range(10): # should be enough?
# simplify this now or else the Float will turn s into a Float
c = 2*Rational(rng.random()) - 1
s = sqrt(1 - c**2)
p1 = Point(x.subs(cos(t), c), y.subs(sin(t), s))
if p1 in self:
return p1
raise GeometryError(
'Having problems generating a point in the ellipse.')
def equation(self, x='x', y='y'):
"""The equation of the ellipse.
Parameters
==========
x : str, optional
Label for the x-axis. Default value is 'x'.
y : str, optional
Label for the y-axis. Default value is 'y'.
Returns
=======
equation : sympy expression
See Also
========
arbitrary_point : Returns parameterized point on ellipse
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(1, 0), 3, 2)
>>> e1.equation()
y**2/4 + (x/3 - 1/3)**2 - 1
"""
x = _symbol(x)
y = _symbol(y)
t1 = ((x - self.center.x) / self.hradius)**2
t2 = ((y - self.center.y) / self.vradius)**2
return t1 + t2 - 1
def _do_line_intersection(self, o):
"""
Find the intersection of a LinearEntity and the ellipse.
All LinearEntities are treated as a line and filtered at
the end to see that they lie in o.
"""
hr_sq = self.hradius ** 2
vr_sq = self.vradius ** 2
lp = o.points
ldir = lp[1] - lp[0]
diff = lp[0] - self.center
mdir = Point(ldir.x/hr_sq, ldir.y/vr_sq)
mdiff = Point(diff.x/hr_sq, diff.y/vr_sq)
a = ldir.dot(mdir)
b = ldir.dot(mdiff)
c = diff.dot(mdiff) - 1
det = simplify(b*b - a*c)
result = []
if det == 0:
t = -b / a
result.append(lp[0] + (lp[1] - lp[0]) * t)
# Definite and potential symbolic intersections are allowed.
elif (det > 0) != False:
root = sqrt(det)
t_a = (-b - root) / a
t_b = (-b + root) / a
result.append( lp[0] + (lp[1] - lp[0]) * t_a )
result.append( lp[0] + (lp[1] - lp[0]) * t_b )
return [r for r in result if r in o]
def _do_ellipse_intersection(self, o):
"""The intersection of an ellipse with another ellipse or a circle.
Private helper method for `intersection`.
"""
x = Dummy('x', real=True)
y = Dummy('y', real=True)
seq = self.equation(x, y)
oeq = o.equation(x, y)
result = solve([seq, oeq], [x, y])
return [Point(*r) for r in list(uniq(result))]
def intersection(self, o):
"""The intersection of this ellipse and another geometrical entity
`o`.
Parameters
==========
o : GeometryEntity
Returns
=======
intersection : list of GeometryEntity objects
Notes
-----
Currently supports intersections with Point, Line, Segment, Ray,
Circle and Ellipse types.
See Also
========
sympy.geometry.entity.GeometryEntity
Examples
========
>>> from sympy import Ellipse, Point, Line, sqrt
>>> e = Ellipse(Point(0, 0), 5, 7)
>>> e.intersection(Point(0, 0))
[]
>>> e.intersection(Point(5, 0))
[Point(5, 0)]
>>> e.intersection(Line(Point(0,0), Point(0, 1)))
[Point(0, -7), Point(0, 7)]
>>> e.intersection(Line(Point(5,0), Point(5, 1)))
[Point(5, 0)]
>>> e.intersection(Line(Point(6,0), Point(6, 1)))
[]
>>> e = Ellipse(Point(-1, 0), 4, 3)
>>> e.intersection(Ellipse(Point(1, 0), 4, 3))
[Point(0, -3*sqrt(15)/4), Point(0, 3*sqrt(15)/4)]
>>> e.intersection(Ellipse(Point(5, 0), 4, 3))
[Point(2, -3*sqrt(7)/4), Point(2, 3*sqrt(7)/4)]
>>> e.intersection(Ellipse(Point(100500, 0), 4, 3))
[]
>>> e.intersection(Ellipse(Point(0, 0), 3, 4))
[Point(-363/175, -48*sqrt(111)/175), Point(-363/175, 48*sqrt(111)/175), Point(3, 0)]
>>> e.intersection(Ellipse(Point(-1, 0), 3, 4))
[Point(-17/5, -12/5), Point(-17/5, 12/5), Point(7/5, -12/5), Point(7/5, 12/5)]
"""
if isinstance(o, Point):
if o in self:
return [o]
else:
return []
elif isinstance(o, LinearEntity):
# LinearEntity may be a ray/segment, so check the points
# of intersection for coincidence first
return self._do_line_intersection(o)
elif isinstance(o, Circle):
return self._do_ellipse_intersection(o)
elif isinstance(o, Ellipse):
if o == self:
return self
else:
return self._do_ellipse_intersection(o)
return o.intersection(self)
def evolute(self, x='x', y='y'):
"""The equation of evolute of the ellipse.
Parameters
==========
x : str, optional
Label for the x-axis. Default value is 'x'.
y : str, optional
Label for the y-axis. Default value is 'y'.
Returns
=======
equation : sympy expression
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(1, 0), 3, 2)
>>> e1.evolute()
2**(2/3)*y**(2/3) + (3*x - 3)**(2/3) - 5**(2/3)
"""
if len(self.args) != 3:
raise NotImplementedError('Evolute of arbitrary Ellipse is not supported.')
x = _symbol(x)
y = _symbol(y)
t1 = (self.hradius*(x - self.center.x))**Rational(2, 3)
t2 = (self.vradius*(y - self.center.y))**Rational(2, 3)
return t1 + t2 - (self.hradius**2 - self.vradius**2)**Rational(2, 3)
def __eq__(self, o):
"""Is the other GeometryEntity the same as this ellipse?"""
return isinstance(o, GeometryEntity) and (self.center == o.center and
self.hradius == o.hradius and
self.vradius == o.vradius)
def __hash__(self):
return super(Ellipse, self).__hash__()
def __contains__(self, o):
if isinstance(o, Point):
x = C.Dummy('x', real=True)
y = C.Dummy('y', real=True)
res = self.equation(x, y).subs({x: o.x, y: o.y})
return trigsimp(simplify(res)) is S.Zero
elif isinstance(o, Ellipse):
return self == o
return False
class Circle(Ellipse):
"""A circle in space.
Constructed simply from a center and a radius, or from three
non-collinear points.
Parameters
==========
center : Point
radius : number or sympy expression
points : sequence of three Points
Attributes
==========
radius (synonymous with hradius, vradius, major and minor)
circumference
equation
Raises
======
GeometryError
When trying to construct circle from three collinear points.
When trying to construct circle from incorrect parameters.
See Also
========
Ellipse, sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import Point, Circle
>>> # a circle constructed from a center and radius
>>> c1 = Circle(Point(0, 0), 5)
>>> c1.hradius, c1.vradius, c1.radius
(5, 5, 5)
>>> # a circle costructed from three points
>>> c2 = Circle(Point(0, 0), Point(1, 1), Point(1, 0))
>>> c2.hradius, c2.vradius, c2.radius, c2.center
(sqrt(2)/2, sqrt(2)/2, sqrt(2)/2, Point(1/2, 1/2))
"""
def __new__(cls, *args, **kwargs):
c, r = None, None
if len(args) == 3:
args = [Point(a) for a in args]
if Point.is_collinear(*args):
raise GeometryError(
"Cannot construct a circle from three collinear points")
from .polygon import Triangle
t = Triangle(*args)
c = t.circumcenter
r = t.circumradius
elif len(args) == 2:
# Assume (center, radius) pair
c = Point(args[0])
r = sympify(args[1])
if not (c is None or r is None):
return GeometryEntity.__new__(cls, c, r, **kwargs)
raise GeometryError("Circle.__new__ received unknown arguments")
@property
def radius(self):
"""The radius of the circle.
Returns
=======
radius : number or sympy expression
See Also
========
Ellipse.major, Ellipse.minor, Ellipse.hradius, Ellipse.vradius
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.radius
6
"""
return self.args[1]
@property
def vradius(self):
"""
This Ellipse property is an alias for the Circle's radius.
Whereas hradius, major and minor can use Ellipse's conventions,
the vradius does not exist for a circle. It is always a positive
value in order that the Circle, like Polygons, will have an
area that can be positive or negative as determined by the sign
of the hradius.
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.vradius
6
"""
return abs(self.radius)
@property
def circumference(self):
"""The circumference of the circle.
Returns
=======
circumference : number or SymPy expression
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.circumference
12*pi
"""
return 2 * S.Pi * self.radius
def equation(self, x='x', y='y'):
"""The equation of the circle.
Parameters
==========
x : str or Symbol, optional
Default value is 'x'.
y : str or Symbol, optional
Default value is 'y'.
Returns
=======
equation : SymPy expression
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(0, 0), 5)
>>> c1.equation()
x**2 + y**2 - 25
"""
x = _symbol(x)
y = _symbol(y)
t1 = (x - self.center.x)**2
t2 = (y - self.center.y)**2
return t1 + t2 - self.major**2
def intersection(self, o):
"""The intersection of this circle with another geometrical entity.
Parameters
==========
o : GeometryEntity
Returns
=======
intersection : list of GeometryEntities
Examples
========
>>> from sympy import Point, Circle, Line, Ray
>>> p1, p2, p3 = Point(0, 0), Point(5, 5), Point(6, 0)
>>> p4 = Point(5, 0)
>>> c1 = Circle(p1, 5)
>>> c1.intersection(p2)
[]
>>> c1.intersection(p4)
[Point(5, 0)]
>>> c1.intersection(Ray(p1, p2))
[Point(5*sqrt(2)/2, 5*sqrt(2)/2)]
>>> c1.intersection(Line(p2, p3))
[]
"""
if isinstance(o, Circle):
if o.center == self.center:
if o.radius == self.radius:
return o
return []
dx, dy = (o.center - self.center).args
d = sqrt(simplify(dy**2 + dx**2))
R = o.radius + self.radius
if d > R or d < abs(self.radius - o.radius):
return []
a = simplify((self.radius**2 - o.radius**2 + d**2) / (2*d))
x2 = self.center.x + (dx * a/d)
y2 = self.center.y + (dy * a/d)
h = sqrt(simplify(self.radius**2 - a**2))
rx = -dy * (h/d)
ry = dx * (h/d)
xi_1 = simplify(x2 + rx)
xi_2 = simplify(x2 - rx)
yi_1 = simplify(y2 + ry)
yi_2 = simplify(y2 - ry)
ret = [Point(xi_1, yi_1)]
if xi_1 != xi_2 or yi_1 != yi_2:
ret.append(Point(xi_2, yi_2))
return ret
return Ellipse.intersection(self, o)
def scale(self, x=1, y=1, pt=None):
"""Override GeometryEntity.scale since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle
>>> Circle((0, 0), 1).scale(2, 2)
Circle(Point(0, 0), 2)
>>> Circle((0, 0), 1).scale(2, 4)
Ellipse(Point(0, 0), 2, 4)
"""
c = self.center
if pt:
pt = Point(pt)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
c = c.scale(x, y)
x, y = [abs(i) for i in (x, y)]
if x == y:
return self.func(c, x*self.radius)
h = v = self.radius
return Ellipse(c, hradius=h*x, vradius=v*y)
def reflect(self, line):
"""Override GeometryEntity.reflect since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle, Line
>>> Circle((0, 1), 1).reflect(Line((0, 0), (1, 1)))
Circle(Point(1, 0), -1)
"""
c = self.center
c = c.reflect(line)
return self.func(c, -self.radius)
from .polygon import Polygon
|
|
#!/usr/bin/env python
"""
@package ion.agents.port.logger_process
@file ion/agents.port/logger_process.py
@author Edward Hunter
@brief Daemon processes providing hardware specific device connections
and logging.
"""
__author__ = 'Edward Hunter'
import socket
import threading
import time
import datetime
import atexit
import errno
from subprocess import Popen
from subprocess import PIPE
import logging
import os
import uuid
from mi.core.daemon_process import DaemonProcess
from mi.core.exceptions import InstrumentConnectionException
mi_logger = logging.getLogger('mi_logger')
"""
import ion.agents.instrument.mi_logger
import ion.agents.port.logger_process as lp
l = lp.EthernetDeviceLogger('137.110.112.119', 4001, 8888)
c = lp.LoggerClient('localhost', 8888, '\r\n')
"""
class BaseLoggerProcess(DaemonProcess):
"""
Base class for device loggers. Device loggers are communication
management processes that are launched by, but have independent lifecycles
from drivers, allowing them to persist even when drivers are shut down
or exit abnormally. Inherets fromo DaemonProcess and provides
a run loop that forwards traffic between driver and device hardware,
and read/write logic for driver and sniffer client objects.
Derived subclasses provide read/write logic for TCP/IP, serial or other
device hardware.
"""
@staticmethod
def launch_logger(cmd_str):
"""
Launch a logger in a sperate python environment.
@param cmd_str the command string for python.
@retval Popen object for the new process.
"""
spawnargs = ['bin/python', '-c', cmd_str]
return Popen(spawnargs, close_fds=True)
def __init__(self, pidfname, logfname, statusfname, portfname, workdir,
delim, ppid):
"""
Base logger process constructor.
@param pidfname Process id file name.
@param logfname Log file name.
@param statusfname Status file name.
@param portfname Port file name.
@param workdir The work directory.
@param delim 2-element delimiter to indicate traffic from the driver
in the logfile.
@param ppid Parent process ID, used to self destruct when parents
die in test cases.
"""
DaemonProcess.__init__(self, pidfname, logfname, workdir)
self.server_port = None
self.driver_server_sock = None
self.driver_sock = None
self.driver_addr = None
self.delim = delim
self.statusfname = workdir + statusfname
self.ppid = ppid
self.last_parent_check = None
self.portfname = workdir + portfname
def _init_driver_comms(self):
"""
Initialize driver comms. Create, bind and listen on the driver
conneciton server port. Make server socket nonblocking so
accepts in the run loop return immediately.
Log success and errors to status file. Handles address in use and
unspecified socket errors.
@retval True on success, False otherwise.
"""
if not self.driver_server_sock:
try:
self.driver_server_sock = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
self.driver_server_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.driver_server_sock.bind(('',0))
sock_name = self.driver_server_sock.getsockname()
self.server_port = sock_name[1]
file(self.portfname,'w+').write(str(self.server_port)+'\n')
self.driver_server_sock.listen(1)
self.driver_server_sock.setblocking(0)
self.statusfile.write('_init_driver_comms: Listening for driver at: %s.\n' % str(sock_name))
self.statusfile.flush()
return True
except socket.error as e:
# [Errno 48] Address already in use.
# Report and fail.
if e.errno == errno.EADDRINUSE:
self.statusfile.write('_init_driver_comms: raised errno %i, %s.\n' % (e.errno, str(e)))
self.statusfile.flush()
return False
else:
# TBD. Report and fail.
self.statusfile.write('_init_driver_comms: raised errno %i, %s.\n' % (e.errno, str(e)))
self.statusfile.flush()
return False
def _accept_driver_comms(self):
"""
Accept a driver connection request from nonblocking driver server
socket. If nothing available, proceed. If a connection is accepted,
log with status file. Handles resource unavailable and unspecified
socket errors.
"""
sock = None
addr = None
try:
sock, host_port_tuple = self.driver_server_sock.accept()
except socket.error as e:
# [Errno 35] Resource temporarily unavailable.
if e.errno == errno.EAGAIN:
# Wating for a driver connection, proceed out of function.
pass
else:
# TBD. Report and proceed.
self.statusfile.write('_accept_driver_comms: raised errno %i, %s.\n' % (e.errno, str(e)))
self.statusfile.flush()
if sock:
self.driver_sock = sock
self.driver_sock.setblocking(0)
self.driver_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.driver_addr = host_port_tuple
self.statusfile.write('_accept_driver_comms: driver connected at %s:%i.\n' % self.driver_addr)
self.statusfile.flush()
def _close_driver_comms(self):
"""
Close driver communications. Close driver and driver server sockets
if they exist. Log with status file.
"""
if self.driver_sock:
self.driver_sock.close()
self.driver_sock = None
self.driver_addr = None
self.statusfile.write('_close_driver_comms: closed driver connection.\n')
self.statusfile.flush()
if self.driver_server_sock:
self.driver_server_sock.close()
self.driver_server_sock = None
self.statusfile.write('_close_driver_comms: closed driver server.\n')
self.statusfile.flush()
def _init_device_comms(self):
"""
Initialize device communications. Overridden by hardware specific
subclasses.
"""
pass
def _close_device_comms(self):
"""
Close communiations with the device. Overridden in hardware specific
subclasses.
"""
pass
def _device_connected(self):
"""
Device connected status function. Overridden in hardware specific
subclasses.
"""
return False
def _check_parent(self):
"""
Check if the original parent is still alive, and fire the shutdown
process if not detected. Used when run in the testing framework
to ensure process and pidfile goes away if the test ends abruptly.
"""
if self.ppid:
cur_time = time.time()
if not self.last_parent_check or (cur_time - self.last_parent_check > 1):
self.last_parent_check = cur_time
try:
os.kill(self.ppid, 0)
except OSError:
self.statusfile.write('_parent_alive: parent process not detected, shutting down.\n')
self.statusfile.flush()
self._cleanup()
def read_driver(self):
"""
Read data from driver, if available. Log errors to status file.
Handles resource unavailable, connection reset by peer, broken pipe
and unspecified socket errors.
@retval The string of data read from the driver or None.
"""
data = None
if self.driver_sock:
try:
data = self.driver_sock.recv(4096)
except socket.error as e:
# [Errno 35] Resource temporarily unavailable.
if e.errno == errno.EAGAIN:
# Nothing to read, proceed out of the function.
pass
# [Errno 54] Connection reset by peer.
elif e.errno == errno.ECONNRESET:
# The client side has disconnected, report and close socket.
self.statusfile.write('read_driver: raised errno %i, %s.\n'
% (e.errno, str(e)))
self.statusfile.flush()
self.driver_sock.close()
self.driver_sock = None
self.driver_addr = None
# [Errno 32] Broken pipe.
elif e.errno == errno.EPIPE:
# Broken pipe, report and close socket.
self.statusfile.write('read_driver: raised errno %i, %s.\n'
% (e.errno, str(e)))
self.statusfile.flush()
self.driver_sock.close()
self.driver_sock = None
self.driver_addr = None
# Unspecified socket error, report and close socket.
else:
# TBD. Report and close socket.
self.statusfile.write('read_driver: raised errno %i, %s.\n'
% (e.errno, str(e)))
self.statusfile.flush()
self.driver_sock.close()
self.driver_sock = None
self.driver_addr = None
return data
def write_driver(self, data):
"""
Write data to driver, retrying until all has been sent. Log errors
to status file. Handles resource unavailable, connection reset by peer,
broken pipe and unspecified socket errors.
@param data The data string to write to the driver.
"""
if self.driver_sock:
sent = 0
while len(data)>0:
try:
sent = self.driver_sock.send(data)
data = data[sent:]
except socket.error as e:
# [Errno 35] Resource temporarily unavailable.
if e.errno == errno.EAGAIN:
# Occurs when the network write buffer is full.
# Sleep a short period of time and retry.
time.sleep(.1)
# [Errno 54] Connection reset by peer.
elif e.errno == errno.ECONNRESET:
# The client side has disconnected, report and close socket.
self.statusfile.write('read_driver: raised errno %i, %s.\n'
% (e.errno, str(e)))
self.statusfile.flush()
self.driver_sock.close()
self.driver_sock = None
self.driver_addr = None
break
# [Errno 32] Broken pipe.
elif e.errno == errno.EPIPE:
# Broken pipe, report and close socket.
self.statusfile.write('read_driver: raised errno %i, %s.\n'
% (e.errno, str(e)))
self.statusfile.flush()
self.driver_sock.close()
self.driver_sock = None
self.driver_addr = None
break
# Unspecified socket error, report and close socket.
else:
# TBD. Report and close socket.
self.statusfile.write('read_driver: raised errno %i, %s.\n' % (e.errno, str(e)))
self.statusfile.flush()
self.driver_sock.close()
self.driver_sock = None
self.driver_addr = None
break
def read_device(self):
"""
Read from device, if available. Overridden by hardware
specific subclass.
@retval The data string read from the device, or None.
"""
pass
def write_device(self, data):
"""
Write to device, retrying until all has been sent. Overridden
by hardware specific subclass.
@param data The data string to write to the device.
"""
pass
def _cleanup(self):
"""
Cleanup function prior to logger exit. Close comms, status file and
call DaemonProcess cleanup. This is called by the DaemonProcess
SIGTERM handler if termination occurs due to signal, or by
atexit handler if the run loop concludes normally.
"""
self._close_device_comms()
self._close_driver_comms()
if os.path.exists(self.portfname):
os.remove(self.portfname)
if self.statusfile:
self.statusfile.write('_cleanup: logger stopping.\n')
self.statusfile.flush()
self.statusfile.close()
self.statusfile = None
DaemonProcess._cleanup(self)
def get_port(self):
"""
Read the logger port file and return the socket to connect to.
"""
try:
pf = file(self.portfname, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
return pid
def _run(self):
"""
Logger run loop. Create and initialize status file, initialize
device and driver comms and loop while device connected. Loop
accepts driver connections, reads driver, writes to device and
sniffer, reads device, writes to driver and sniffer and repeats.
Logger is stopped by calling DaemonProcess.stop() resulting in
SIGTERM signal sent to the logger, or if the device hardware connection
is lost, whereby the run loop and logger process will terminate.
"""
atexit.register(self._cleanup)
self.statusfile = file(self.statusfname, 'w+')
self.statusfile.write('_run: logger starting.\n')
self.statusfile.flush()
if not self._init_device_comms():
self.statusfile.write('_run: could not connect to device.\n')
self.statusfile.flush()
self._cleanup()
return
if not self._init_driver_comms():
self.statusfile.write('_run: could not listen for drivers.\n')
self.statusfile.flush()
self._cleanup()
return
#last_time = time.time()
while self._device_connected():
self._accept_driver_comms()
driver_data = self.read_driver()
if driver_data:
self.write_device(driver_data)
self.logfile.write(self.delim[0]+repr(driver_data)+self.delim[1])
self.logfile.write('\n')
self.logfile.flush()
device_data = self.read_device()
if device_data:
self.write_driver(device_data)
self.logfile.write(repr(device_data))
self.logfile.write('\n')
self.logfile.flush()
self._check_parent()
if not driver_data and not device_data:
time.sleep(.1)
class EthernetDeviceLogger(BaseLoggerProcess):
"""
A device logger process specialized to read/write to TCP/IP devices.
Provides functionality opening, closing, reading, writing and checking
connection status of device.
"""
@classmethod
def launch_process(cls, device_host, device_port, workdir='/tmp/',
delim=None, ppid=None):
"""
Class method to be used in place of a constructor to launch a logger in
a fully seperate python interpreter process. Builds command line for
EthernetDeviceLogger and calls base class static method.
@param device_host Internet address of the device.
@param device_port Port of the device.
@param workdir The work directory, by default '/tmp/'.
@param delim 2-element delimiter to indicate traffic from the driver
in the logfile. If not given or if None, ['<<', '>>'] is used.
@param ppid Parent process ID, used to self destruct when parents
die in test cases.
@retval An EthernetDeviceLogger object to control the remote process.
"""
delim = delim or ['<<', '>>']
start_time = datetime.datetime.now()
dt_string = '%i_%i_%i_%i_%i_%i' % \
(start_time.year, start_time.month,
start_time.day, start_time.hour, start_time.minute,
start_time.second)
tag = str(uuid.uuid4())
pidfname = '%s_%i_%s.pid.txt' % (device_host, device_port, tag)
portfname = '%s_%i_%s.port.txt' % (device_host, device_port, tag)
logfname = '%s_%i_%s__%s.log.txt' % (device_host, device_port, tag, dt_string)
statusfname = '%s_%i_%s__%s.status.txt' % (device_host, device_port, tag, dt_string)
cmd_str = 'from %s import %s; l = %s("%s", %i, "%s", "%s", "%s", "%s", "%s", %s, %s); l.start()' \
% (__name__, cls.__name__, cls.__name__, device_host, device_port, pidfname,
logfname, statusfname, portfname, workdir, str(delim), str(ppid))
BaseLoggerProcess.launch_logger(cmd_str)
return EthernetDeviceLogger(device_host, device_port, pidfname, logfname,
statusfname, portfname, workdir, delim, ppid)
def __init__(self, device_host, device_port, pidfname, logfname,
statusfname, portfname, workdir, delim, ppid):
"""
Ethernet device logger constructor. Initialize ethernet specific
members and call base class constructor.
@param device_host Internet address of the device.
@param device_port Port of the device.
@param pidfname Process id file name.
@param logfname Log file name.
@param statusfname Status file name.
@param portfname Port file name.
@param workdir The work directory.
@param delim 2-element delimiter to indicate traffic from the driver
in the logfile.
@param ppid Parent process ID, used to self destruct when parents
die in test cases.
"""
self.device_host = device_host
self.device_port = device_port
self.device_sock = None
BaseLoggerProcess.__init__(self, pidfname, logfname, statusfname, portfname,
workdir, delim, ppid)
def _init_device_comms(self):
"""
Initialize ethernet device comms. Attempt to connect to an IP
device with timeout, setting socket to nonblocking and returning
Log success or error with statusfile.
@retval True on success, False otherwise.
"""
self.device_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.device_sock.settimeout(10)
try:
self.device_sock.connect((self.device_host, self.device_port))
self.device_sock.setblocking(0)
self.device_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except Exception as e:
# This could be a timeout.
self.statusfile.write('_init_device_comms: raised %s.\n' % str(e))
self.statusfile.flush()
self.device_sock = None
return False
else:
sock_name = self.device_sock.getsockname()
self.statusfile.write('_init_device_comms: device connected at: %s\n' % str(sock_name))
self.statusfile.flush()
return True
def _close_device_comms(self):
"""
Close ethernet device comms and log with status file.
"""
if self.device_sock:
#-self.device_sock.shutdown(socket.SHUT_RDWR)
self.device_sock.close()
self.device_sock = None
time.sleep(1)
self.statusfile.write('_close_device_comms: device connection closed.\n')
self.statusfile.flush()
def _device_connected(self):
"""
Determine if device still connected.
@retval True on success, False otherwise.
"""
return self.device_sock != None
def read_device(self):
"""
Read from an ethernet device, if available. Log errors (except
resource temporarily unavailable, if they occur.) Handles resource
temporarily unavailable, connection reset by peer, broken pipe,
and unspecified socket errors.
@retval A data string read from the device, or None.
"""
data = None
if self.device_sock:
try:
data = self.device_sock.recv(4096)
except socket.error as e:
# [Errno 35] Resource temporarily unavailable.
if e.errno == errno.EAGAIN:
# No data to read from device.
# Proceed out of the read function.
pass
# [Errno 54] Connection reset by peer.
elif e.errno == errno.ECONNRESET:
# TBD. Report and close socket (end logger).
self.statusfile.write('read_device: raised errno %i, %s.\n' % (e.errno, str(e)))
self.statusfile.flush()
self.device_sock.close()
self.device_sock = None
# [Errno 32] Broken pipe.
elif e.errno == errno.EPIPE:
# TBD. Report and close socket (end logger).
self.statusfile.write('read_device: raised errno %i, %s.\n' % (e.errno, str(e)))
self.statusfile.flush()
self.device_sock.close()
self.device_sock = None
# Unspecified socket error.
else:
# TBD. Report and close socket (end logger).
self.statusfile.write('read_device: raised errno %i, %s.\n' % (e.errno, str(e)))
self.statusfile.flush()
self.device_sock.close()
self.device_sock = None
return data
def write_device(self, data):
"""
Write to an ethernet device, retrying until all sent. Log errors (except
resource temporarily unavailable, if they occur.) Handles resource
temporarily unavailable, connection reset by peer, broken pipe,
and unspecified socket errors.
@param data The data string to write to the device.
"""
if self.device_sock:
sent = 0
while len(data)>0:
try:
sent = self.device_sock.send(data)
data = data[sent:]
except socket.error as e:
# [Errno 35] Resource temporarily unavailable.
if e.errno == errno.EAGAIN:
# Occurs when the network write buffer is full.
# Sleep a short period of time and retry.
time.sleep(.1)
# [Errno 54] Connection reset by peer.
elif e.errno == errno.ECONNRESET:
# TBD. Report and close socket (end logger).
self.statusfile.write('write_device: raised errno %i, %s.\n' % (e.errno, str(e)))
self.statusfile.flush()
self.device_sock.close()
self.device_sock = None
break
# [Errno 32] Broken pipe.
elif e.errno == errno.EPIPE:
# TBD. Report and close socket (end logger).
self.statusfile.write('write_device: raised errno %i, %s.\n' % (e.errno, str(e)))
self.statusfile.flush()
self.device_sock.close()
self.device_sock = None
break
# Unspecified socket error, report and close socket.
else:
# TBD. Report and close socket (end logger).
self.statusfile.write('write_device: raised errno %i, %s.\n' % (e.errno, str(e)))
self.statusfile.flush()
self.device_sock.close()
self.device_sock = None
break
class SerialDeviceLogger(BaseLoggerProcess):
"""
A device logger process specialized to read/write to serial devices.
Provides functionality opening, closing, reading, writing and checking
connection status of device.
"""
def __init__(self):
"""
Serial logger constructor. Set serial specific members and call
base class constructor.
"""
pass
class LoggerClient(object):
"""
A logger process client class to test and demonstrate the correct use
of device logger processes. The client object starts and stops
comms with the logger. Data is sent to the logger with the send function,
and data is retrieved from the logger with a listener thread.
"""
def __init__(self, host, port, delim=None):
"""
Logger client constructor.
"""
self.host = host
self.port = port
self.sock = None
self.listener_thread = None
self.stop_event = None
self.delim = delim
def init_comms(self, callback=None):
"""
Initialize client comms with the logger process and start a
listener thread.
"""
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# This can be thrown here.
# error: [Errno 61] Connection refused
self.sock.connect((self.host, self.port))
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.sock.setblocking(0)
self.listener_thread = Listener(self.sock, self.delim, callback)
self.listener_thread.start()
mi_logger.info('LoggerClient.init_comms(): connected to port agent at %s:%i.'
% (self.host, self.port))
except:
raise InstrumentConnectionException('Failed to connect to port agent at %s:%i.'
% (self.host, self.port))
def stop_comms(self):
"""
Stop the listener thread and close client comms with the device
logger. This is called by the done function.
"""
mi_logger.info('Logger shutting down comms.')
self.listener_thread.done()
self.listener_thread.join()
#-self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
self.sock = None
mi_logger.info('Logger client comms stopped.')
def done(self):
"""
Synonym for stop_comms.
"""
self.stop_comms()
def send(self, data):
"""
Send data to the device logger, retrying until all is sent.
"""
if self.sock:
while len(data)>0:
try:
sent = self.sock.send(data)
gone = data[:sent]
data = data[sent:]
except socket.error:
time.sleep(.1)
class Listener(threading.Thread):
"""
A listener thread to monitor the client socket data incomming from
the logger process. A similar construct will be used in drivers
to catch and act upon the incomming data, so the pattern is presented here.
"""
def __init__(self, sock, delim, callback=None):
"""
Listener thread constructor.
@param sock The socket to listen on.
@param delim The line delimiter to split incomming lines on, used in
debugging when no callback is supplied.
@param callback The callback on data arrival.
"""
threading.Thread.__init__(self)
self.sock = sock
self._done = False
self.linebuf = ''
self.delim = delim
if callback:
def fn_callback(data):
callback(data)
self.callback = fn_callback
else:
self.callback = None
def done(self):
"""
Signal to the listener thread to end its processing loop and
conclude.
"""
self._done = True
def run(self):
"""
Listener thread processing loop. Read incomming data when
available and report it to the logger.
"""
mi_logger.info('Logger client listener started.')
while not self._done:
try:
data = self.sock.recv(4069)
if self.callback:
self.callback(data)
else:
if not self.delim:
print 'from device:%s' % repr(data)
else:
self.linebuf += data
lines = str.split(self.linebuf, self.delim)
self.linebuf = lines[-1]
lines = lines[:-1]
for item in lines:
print 'from device:%s' % item
except socket.error:
time.sleep(.1)
mi_logger.info('Logger client done listening.')
|
|
# Copyright 2014 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
import mock
from oslo_config import cfg
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
from nova import exception
from nova import objects
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_flavor
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import constants
from nova.virt.hyperv import imagecache
CONF = cfg.CONF
class ImageCacheTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V ImageCache class."""
FAKE_FORMAT = 'fake_format'
FAKE_IMAGE_REF = 'fake_image_ref'
FAKE_VHD_SIZE_GB = 1
def setUp(self):
super(ImageCacheTestCase, self).setUp()
self.context = 'fake-context'
self.instance = fake_instance.fake_instance_obj(self.context)
# utilsfactory will check the host OS version via get_hostutils,
# in order to return the proper Utils Class, so it must be mocked.
patched_get_hostutils = mock.patch.object(imagecache.utilsfactory,
"get_hostutils")
patched_get_vhdutils = mock.patch.object(imagecache.utilsfactory,
"get_vhdutils")
patched_get_hostutils.start()
patched_get_vhdutils.start()
self.addCleanup(patched_get_hostutils.stop)
self.addCleanup(patched_get_vhdutils.stop)
self.imagecache = imagecache.ImageCache()
self.imagecache._pathutils = mock.MagicMock()
self.imagecache._vhdutils = mock.MagicMock()
self.tmpdir = self.useFixture(fixtures.TempDir()).path
def _test_get_root_vhd_size_gb(self, old_flavor=True):
if old_flavor:
mock_flavor = objects.Flavor(**test_flavor.fake_flavor)
self.instance.old_flavor = mock_flavor
else:
self.instance.old_flavor = None
return self.imagecache._get_root_vhd_size_gb(self.instance)
def test_get_root_vhd_size_gb_old_flavor(self):
ret_val = self._test_get_root_vhd_size_gb()
self.assertEqual(test_flavor.fake_flavor['root_gb'], ret_val)
def test_get_root_vhd_size_gb(self):
ret_val = self._test_get_root_vhd_size_gb(old_flavor=False)
self.assertEqual(self.instance.flavor.root_gb, ret_val)
@mock.patch.object(imagecache.ImageCache, '_get_root_vhd_size_gb')
def test_resize_and_cache_vhd_smaller(self, mock_get_vhd_size_gb):
self.imagecache._vhdutils.get_vhd_size.return_value = {
'VirtualSize': (self.FAKE_VHD_SIZE_GB + 1) * units.Gi
}
mock_get_vhd_size_gb.return_value = self.FAKE_VHD_SIZE_GB
mock_internal_vhd_size = (
self.imagecache._vhdutils.get_internal_vhd_size_by_file_size)
mock_internal_vhd_size.return_value = self.FAKE_VHD_SIZE_GB * units.Gi
self.assertRaises(exception.FlavorDiskSmallerThanImage,
self.imagecache._resize_and_cache_vhd,
mock.sentinel.instance,
mock.sentinel.vhd_path)
self.imagecache._vhdutils.get_vhd_size.assert_called_once_with(
mock.sentinel.vhd_path)
mock_get_vhd_size_gb.assert_called_once_with(mock.sentinel.instance)
mock_internal_vhd_size.assert_called_once_with(
mock.sentinel.vhd_path, self.FAKE_VHD_SIZE_GB * units.Gi)
def _prepare_get_cached_image(self, path_exists=False, use_cow=False,
rescue_image_id=None):
self.instance.image_ref = self.FAKE_IMAGE_REF
self.imagecache._pathutils.get_base_vhd_dir.return_value = (
self.tmpdir)
self.imagecache._pathutils.exists.return_value = path_exists
self.imagecache._vhdutils.get_vhd_format.return_value = (
constants.DISK_FORMAT_VHD)
CONF.set_override('use_cow_images', use_cow)
image_file_name = rescue_image_id or self.FAKE_IMAGE_REF
expected_path = os.path.join(self.tmpdir,
image_file_name)
expected_vhd_path = "%s.%s" % (expected_path,
constants.DISK_FORMAT_VHD.lower())
return (expected_path, expected_vhd_path)
@mock.patch.object(imagecache.images, 'fetch')
def test_get_cached_image_with_fetch(self, mock_fetch):
(expected_path,
expected_vhd_path) = self._prepare_get_cached_image(False, False)
result = self.imagecache.get_cached_image(self.context, self.instance)
self.assertEqual(expected_vhd_path, result)
mock_fetch.assert_called_once_with(self.context, self.FAKE_IMAGE_REF,
expected_path)
self.imagecache._vhdutils.get_vhd_format.assert_called_once_with(
expected_path)
self.imagecache._pathutils.rename.assert_called_once_with(
expected_path, expected_vhd_path)
@mock.patch.object(imagecache.images, 'fetch')
def test_get_cached_image_with_fetch_exception(self, mock_fetch):
(expected_path,
expected_vhd_path) = self._prepare_get_cached_image(False, False)
# path doesn't exist until fetched.
self.imagecache._pathutils.exists.side_effect = [False, False, True]
mock_fetch.side_effect = exception.InvalidImageRef(
image_href=self.FAKE_IMAGE_REF)
self.assertRaises(exception.InvalidImageRef,
self.imagecache.get_cached_image,
self.context, self.instance)
self.imagecache._pathutils.remove.assert_called_once_with(
expected_path)
@mock.patch.object(imagecache.ImageCache, '_resize_and_cache_vhd')
def test_get_cached_image_use_cow(self, mock_resize):
(expected_path,
expected_vhd_path) = self._prepare_get_cached_image(True, True)
expected_resized_vhd_path = expected_vhd_path + 'x'
mock_resize.return_value = expected_resized_vhd_path
result = self.imagecache.get_cached_image(self.context, self.instance)
self.assertEqual(expected_resized_vhd_path, result)
mock_resize.assert_called_once_with(self.instance, expected_vhd_path)
@mock.patch.object(imagecache.images, 'fetch')
def test_cache_rescue_image_bigger_than_flavor(self, mock_fetch):
fake_rescue_image_id = 'fake_rescue_image_id'
self.imagecache._vhdutils.get_vhd_info.return_value = {
'VirtualSize': (self.instance.flavor.root_gb + 1) * units.Gi}
(expected_path,
expected_vhd_path) = self._prepare_get_cached_image(
rescue_image_id=fake_rescue_image_id)
self.assertRaises(exception.ImageUnacceptable,
self.imagecache.get_cached_image,
self.context, self.instance,
fake_rescue_image_id)
mock_fetch.assert_called_once_with(self.context,
fake_rescue_image_id,
expected_path)
self.imagecache._vhdutils.get_vhd_info.assert_called_once_with(
expected_vhd_path)
def test_age_and_verify_cached_images(self):
fake_images = [mock.sentinel.FAKE_IMG1, mock.sentinel.FAKE_IMG2]
fake_used_images = [mock.sentinel.FAKE_IMG1]
self.imagecache.originals = fake_images
self.imagecache.used_images = fake_used_images
self.imagecache._update_image_timestamp = mock.Mock()
self.imagecache._remove_if_old_image = mock.Mock()
self.imagecache._age_and_verify_cached_images(
mock.sentinel.FAKE_CONTEXT,
mock.sentinel.all_instances,
mock.sentinel.tmpdir)
self.imagecache._update_image_timestamp.assert_called_once_with(
mock.sentinel.FAKE_IMG1)
self.imagecache._remove_if_old_image.assert_called_once_with(
mock.sentinel.FAKE_IMG2)
@mock.patch.object(imagecache.os, 'utime')
@mock.patch.object(imagecache.ImageCache, '_get_image_backing_files')
def test_update_image_timestamp(self, mock_get_backing_files, mock_utime):
mock_get_backing_files.return_value = [mock.sentinel.backing_file,
mock.sentinel.resized_file]
self.imagecache._update_image_timestamp(mock.sentinel.image)
mock_get_backing_files.assert_called_once_with(mock.sentinel.image)
mock_utime.assert_has_calls([
mock.call(mock.sentinel.backing_file, None),
mock.call(mock.sentinel.resized_file, None)])
def test_get_image_backing_files(self):
image = 'fake-img'
self.imagecache.unexplained_images = ['%s_42' % image,
'unexplained-img']
self.imagecache._pathutils.get_image_path.side_effect = [
mock.sentinel.base_file, mock.sentinel.resized_file]
backing_files = self.imagecache._get_image_backing_files(image)
self.assertEqual([mock.sentinel.base_file, mock.sentinel.resized_file],
backing_files)
self.imagecache._pathutils.get_image_path.assert_has_calls(
[mock.call(image), mock.call('%s_42' % image)])
@mock.patch.object(imagecache.ImageCache, '_get_image_backing_files')
def test_remove_if_old_image(self, mock_get_backing_files):
mock_get_backing_files.return_value = [mock.sentinel.backing_file,
mock.sentinel.resized_file]
self.imagecache._pathutils.get_age_of_file.return_value = 3600
self.imagecache._remove_if_old_image(mock.sentinel.image)
calls = [mock.call(mock.sentinel.backing_file),
mock.call(mock.sentinel.resized_file)]
self.imagecache._pathutils.get_age_of_file.assert_has_calls(calls)
mock_get_backing_files.assert_called_once_with(mock.sentinel.image)
def test_remove_old_image(self):
fake_img_path = os.path.join(self.tmpdir,
self.FAKE_IMAGE_REF)
self.imagecache._remove_old_image(fake_img_path)
self.imagecache._pathutils.remove.assert_called_once_with(
fake_img_path)
@mock.patch.object(imagecache.ImageCache, '_age_and_verify_cached_images')
@mock.patch.object(imagecache.ImageCache, '_list_base_images')
@mock.patch.object(imagecache.ImageCache, '_list_running_instances')
def test_update(self, mock_list_instances, mock_list_images,
mock_age_cached_images):
base_vhd_dir = self.imagecache._pathutils.get_base_vhd_dir.return_value
mock_list_instances.return_value = {
'used_images': {mock.sentinel.image: mock.sentinel.instances}}
mock_list_images.return_value = {
'originals': [mock.sentinel.original_image],
'unexplained_images': [mock.sentinel.unexplained_image]}
self.imagecache.update(mock.sentinel.context,
mock.sentinel.all_instances)
self.assertEqual([mock.sentinel.image],
list(self.imagecache.used_images))
self.assertEqual([mock.sentinel.original_image],
self.imagecache.originals)
self.assertEqual([mock.sentinel.unexplained_image],
self.imagecache.unexplained_images)
mock_list_instances.assert_called_once_with(
mock.sentinel.context, mock.sentinel.all_instances)
mock_list_images.assert_called_once_with(base_vhd_dir)
mock_age_cached_images.assert_called_once_with(
mock.sentinel.context, mock.sentinel.all_instances, base_vhd_dir)
@mock.patch.object(imagecache.os, 'listdir')
def test_list_base_images(self, mock_listdir):
original_image = uuids.fake
unexplained_image = 'just-an-image'
ignored_file = 'foo.bar'
mock_listdir.return_value = ['%s.VHD' % original_image,
'%s.vhdx' % unexplained_image,
ignored_file]
images = self.imagecache._list_base_images(mock.sentinel.base_dir)
self.assertEqual([original_image], images['originals'])
self.assertEqual([unexplained_image], images['unexplained_images'])
mock_listdir.assert_called_once_with(mock.sentinel.base_dir)
|
|
#! /usr/bin/env python
import os
import re
import sys
import time
import random
import logging
import tempfile
import subprocess
import shutil
import argparse
# params overwrite priority:
# for default:
# default_params < blackbox|whitebox_default_params < args
# for simple:
# simple_default_params < blackbox|whitebox_simple_default_params < args
default_params = {
"acquire_snapshot_one_in": 10000,
"block_size": 16384,
"cache_size": 1048576,
"use_clock_cache": "false",
"delpercent": 5,
"destroy_db_initially": 0,
"disable_wal": 0,
"allow_concurrent_memtable_write": 0,
"iterpercent": 10,
"max_background_compactions": 20,
"max_bytes_for_level_base": 10485760,
"max_key": 100000000,
"max_write_buffer_number": 3,
"memtablerep": "prefix_hash",
"mmap_read": lambda: random.randint(0, 1),
"open_files": 500000,
"prefix_size": 7,
"prefixpercent": 5,
"progress_reports": 0,
"readpercent": 45,
"reopen": 20,
"snapshot_hold_ops": 100000,
"sync": 0,
"target_file_size_base": 2097152,
"target_file_size_multiplier": 2,
"threads": 32,
"verify_checksum": 1,
"write_buffer_size": 4 * 1024 * 1024,
"writepercent": 35,
"log2_keys_per_lock": 2,
"subcompactions": lambda: random.randint(1, 4),
"use_merge": lambda: random.randint(0, 1),
"use_full_merge_v1": lambda: random.randint(0, 1),
}
def get_dbname(test_name):
test_tmpdir = os.environ.get("TEST_TMPDIR")
if test_tmpdir is None or test_tmpdir == "":
dbname = tempfile.mkdtemp(prefix='rocksdb_crashtest_' + test_name)
else:
dbname = test_tmpdir + "/rocksdb_crashtest_" + test_name
shutil.rmtree(dbname, True)
return dbname
blackbox_default_params = {
# total time for this script to test db_stress
"duration": 6000,
# time for one db_stress instance to run
"interval": 120,
# since we will be killing anyway, use large value for ops_per_thread
"ops_per_thread": 100000000,
"set_options_one_in": 10000,
"test_batches_snapshots": 1,
}
whitebox_default_params = {
"duration": 10000,
"log2_keys_per_lock": 10,
"nooverwritepercent": 1,
"ops_per_thread": 200000,
"test_batches_snapshots": lambda: random.randint(0, 1),
"write_buffer_size": 4 * 1024 * 1024,
"subcompactions": lambda: random.randint(1, 4),
"random_kill_odd": 888887,
}
simple_default_params = {
"block_size": 16384,
"cache_size": 1048576,
"use_clock_cache": "false",
"column_families": 1,
"delpercent": 5,
"destroy_db_initially": 0,
"disable_wal": 0,
"allow_concurrent_memtable_write": lambda: random.randint(0, 1),
"iterpercent": 10,
"max_background_compactions": 1,
"max_bytes_for_level_base": 67108864,
"max_key": 100000000,
"max_write_buffer_number": 3,
"memtablerep": "skip_list",
"mmap_read": lambda: random.randint(0, 1),
"prefix_size": 0,
"prefixpercent": 0,
"progress_reports": 0,
"readpercent": 50,
"reopen": 20,
"sync": 0,
"target_file_size_base": 16777216,
"target_file_size_multiplier": 1,
"test_batches_snapshots": 0,
"threads": 32,
"verify_checksum": 1,
"write_buffer_size": 32 * 1024 * 1024,
"writepercent": 35,
"subcompactions": lambda: random.randint(1, 4),
}
blackbox_simple_default_params = {
"duration": 6000,
"interval": 120,
"open_files": -1,
"ops_per_thread": 100000000,
"set_options_one_in": 0,
"test_batches_snapshots": 0,
}
whitebox_simple_default_params = {
"duration": 10000,
"log2_keys_per_lock": 10,
"nooverwritepercent": 1,
"open_files": 500000,
"ops_per_thread": 200000,
"write_buffer_size": 32 * 1024 * 1024,
"subcompactions": lambda: random.randint(1, 4),
}
def finalize_and_sanitize(src_params):
dest_params = dict([(k, v() if callable(v) else v)
for (k, v) in src_params.items()])
if dest_params.get("allow_concurrent_memtable_write", 1) == 1:
dest_params["memtablerep"] = "skip_list"
return dest_params
def gen_cmd_params(args):
params = {}
if args.simple:
params.update(simple_default_params)
if args.test_type == 'blackbox':
params.update(blackbox_simple_default_params)
if args.test_type == 'whitebox':
params.update(whitebox_simple_default_params)
if not args.simple:
params.update(default_params)
if args.test_type == 'blackbox':
params.update(blackbox_default_params)
if args.test_type == 'whitebox':
params.update(whitebox_default_params)
for k, v in vars(args).items():
if v is not None:
params[k] = v
return params
def gen_cmd(params):
cmd = ['./db_stress'] + [
'--{0}={1}'.format(k, v)
for k, v in finalize_and_sanitize(params).items()
if k not in set(['test_type', 'simple', 'duration', 'interval',
'random_kill_odd'])
and v is not None]
return cmd
# This script runs and kills db_stress multiple times. It checks consistency
# in case of unsafe crashes in RocksDB.
def blackbox_crash_main(args):
cmd_params = gen_cmd_params(args)
dbname = get_dbname('blackbox')
exit_time = time.time() + cmd_params['duration']
print("Running blackbox-crash-test with \n"
+ "interval_between_crash=" + str(cmd_params['interval']) + "\n"
+ "total-duration=" + str(cmd_params['duration']) + "\n"
+ "threads=" + str(cmd_params['threads']) + "\n"
+ "ops_per_thread=" + str(cmd_params['ops_per_thread']) + "\n"
+ "write_buffer_size=" + str(cmd_params['write_buffer_size']) + "\n"
+ "subcompactions=" + str(cmd_params['subcompactions']) + "\n")
while time.time() < exit_time:
run_had_errors = False
killtime = time.time() + cmd_params['interval']
cmd = gen_cmd(dict(cmd_params.items() + {'db': dbname}.items()))
child = subprocess.Popen(cmd, stderr=subprocess.PIPE)
print("Running db_stress with pid=%d: %s\n\n"
% (child.pid, ' '.join(cmd)))
stop_early = False
while time.time() < killtime:
if child.poll() is not None:
print("WARNING: db_stress ended before kill: exitcode=%d\n"
% child.returncode)
stop_early = True
break
time.sleep(1)
if not stop_early:
if child.poll() is not None:
print("WARNING: db_stress ended before kill: exitcode=%d\n"
% child.returncode)
else:
child.kill()
print("KILLED %d\n" % child.pid)
time.sleep(1) # time to stabilize after a kill
while True:
line = child.stderr.readline().strip()
if line != '' and not line.startswith('WARNING'):
run_had_errors = True
print('stderr has error message:')
print('***' + line + '***')
else:
break
if run_had_errors:
sys.exit(2)
time.sleep(1) # time to stabilize before the next run
# we need to clean up after ourselves -- only do this on test success
shutil.rmtree(dbname, True)
# This python script runs db_stress multiple times. Some runs with
# kill_random_test that causes rocksdb to crash at various points in code.
def whitebox_crash_main(args):
cmd_params = gen_cmd_params(args)
dbname = get_dbname('whitebox')
cur_time = time.time()
exit_time = cur_time + cmd_params['duration']
half_time = cur_time + cmd_params['duration'] / 2
print("Running whitebox-crash-test with \n"
+ "total-duration=" + str(cmd_params['duration']) + "\n"
+ "threads=" + str(cmd_params['threads']) + "\n"
+ "ops_per_thread=" + str(cmd_params['ops_per_thread']) + "\n"
+ "write_buffer_size=" + str(cmd_params['write_buffer_size']) + "\n"
+ "subcompactions=" + str(cmd_params['subcompactions']) + "\n")
total_check_mode = 4
check_mode = 0
kill_random_test = cmd_params['random_kill_odd']
kill_mode = 0
while time.time() < exit_time:
if check_mode == 0:
additional_opts = {
# use large ops per thread since we will kill it anyway
"ops_per_thread": 100 * cmd_params['ops_per_thread'],
}
# run with kill_random_test, with three modes.
# Mode 0 covers all kill points. Mode 1 covers less kill points but
# increases change of triggering them. Mode 2 covers even less
# frequent kill points and further increases triggering change.
if kill_mode == 0:
additional_opts.update({
"kill_random_test": kill_random_test,
})
elif kill_mode == 1:
additional_opts.update({
"kill_random_test": (kill_random_test / 10 + 1),
"kill_prefix_blacklist": "WritableFileWriter::Append,"
+ "WritableFileWriter::WriteBuffered",
})
elif kill_mode == 2:
# TODO: May need to adjust random odds if kill_random_test
# is too small.
additional_opts.update({
"kill_random_test": (kill_random_test / 5000 + 1),
"kill_prefix_blacklist": "WritableFileWriter::Append,"
"WritableFileWriter::WriteBuffered,"
"PosixMmapFile::Allocate,WritableFileWriter::Flush",
})
# Run kill mode 0, 1 and 2 by turn.
kill_mode = (kill_mode + 1) % 3
elif check_mode == 1:
# normal run with universal compaction mode
additional_opts = {
"kill_random_test": None,
"ops_per_thread": cmd_params['ops_per_thread'],
"compaction_style": 1,
}
elif check_mode == 2:
# normal run with FIFO compaction mode
# ops_per_thread is divided by 5 because FIFO compaction
# style is quite a bit slower on reads with lot of files
additional_opts = {
"kill_random_test": None,
"ops_per_thread": cmd_params['ops_per_thread'] / 5,
"compaction_style": 2,
}
else:
# normal run
additional_opts = additional_opts = {
"kill_random_test": None,
"ops_per_thread": cmd_params['ops_per_thread'],
}
cmd = gen_cmd(dict(cmd_params.items() + additional_opts.items()
+ {'db': dbname}.items()))
print "Running:" + ' '.join(cmd) + "\n"
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdoutdata, stderrdata = popen.communicate()
retncode = popen.returncode
msg = ("check_mode={0}, kill option={1}, exitcode={2}\n".format(
check_mode, additional_opts['kill_random_test'], retncode))
print msg
print stdoutdata
expected = False
if additional_opts['kill_random_test'] is None and (retncode == 0):
# we expect zero retncode if no kill option
expected = True
elif additional_opts['kill_random_test'] is not None and retncode < 0:
# we expect negative retncode if kill option was given
expected = True
if not expected:
print "TEST FAILED. See kill option and exit code above!!!\n"
sys.exit(1)
stdoutdata = stdoutdata.lower()
errorcount = (stdoutdata.count('error') -
stdoutdata.count('got errors 0 times'))
print "#times error occurred in output is " + str(errorcount) + "\n"
if (errorcount > 0):
print "TEST FAILED. Output has 'error'!!!\n"
sys.exit(2)
if (stdoutdata.find('fail') >= 0):
print "TEST FAILED. Output has 'fail'!!!\n"
sys.exit(2)
# First half of the duration, keep doing kill test. For the next half,
# try different modes.
if time.time() > half_time:
# we need to clean up after ourselves -- only do this on test
# success
shutil.rmtree(dbname, True)
check_mode = (check_mode + 1) % total_check_mode
time.sleep(1) # time to stabilize after a kill
def main():
parser = argparse.ArgumentParser(description="This script runs and kills \
db_stress multiple times")
parser.add_argument("test_type", choices=["blackbox", "whitebox"])
parser.add_argument("--simple", action="store_true")
all_params = dict(default_params.items()
+ blackbox_default_params.items()
+ whitebox_default_params.items()
+ simple_default_params.items()
+ blackbox_simple_default_params.items()
+ whitebox_simple_default_params.items())
for k, v in all_params.items():
parser.add_argument("--" + k, type=type(v() if callable(v) else v))
args = parser.parse_args()
if args.test_type == 'blackbox':
blackbox_crash_main(args)
if args.test_type == 'whitebox':
whitebox_crash_main(args)
if __name__ == '__main__':
main()
|
|
#
# Copyright (c) 2014, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""API Module for working with EOS local user resources
The Users resource provides configuration of local user resources for
an EOS node.
Parameters:
username (string): The username parameter maps to the local username
defined in the running-config.
nopassword (boolean): Configures the username to use no password at login.
This parameter is mutually exclusive with secret
privilege (integer): Configures the user privilege level in EOS
role (string): Configures the users role in EOS
secret (string): Configures the users secret (password) to use at login.
This parameter is mutually exclusive with secret and is used in
conjunction with format.
format (string): Configures the format of the secret value. Accepted
values for format are "cleartext", "md5" and "sha512"
"""
import re
from pyeapi.api import EntityCollection
DEFAULT_ENCRYPTION = 'cleartext'
ENCRYPTION_MAP = {'cleartext': 0, 'md5': 5, 'sha512': 'sha512'}
def isprivilege(value):
"""Checks value for valid privilege level
Args:
value (str, int): Checks if value is a valid user privilege
Returns:
True if the value is valid, otherwise False
"""
try:
value = int(value)
return 0 <= value < 16
except ValueError:
return False
class Users(EntityCollection):
"""The Users class provides a configuration resource for local users.
The regex used here parses the running configuration to find username
entries. There is extra logic in the regular expression to store
the username as 'user' and then creates a backreference to find a
following configuration line that might contain the users sshkey.
"""
def get(self, name):
"""Returns the local user configuration as a resource dict
Args:
name (str): The username to return from the nodes global running-
config.
Returns:
dict: A resource dict object
If the `name` does not exist, then None is returned
"""
return self.getall().get(name)
def getall(self):
"""Returns all local users configuration as a resource dict
Returns:
dict: A dict of usernames with a nested resource dict object
"""
if self.version_number >= '4.23':
self.users_re = re.compile(r'username (?P<user>[^\s]+) '
r'privilege (\d+)'
r'(?: role ([^\s]+))?'
r'(?: (nopassword))?'
r'(?: secret (0|5|7|sha512) (.+))?'
r'.*$\n(?:username (?P=user) '
r'ssh.key (.+)$)?', re.M)
else:
self.users_re = re.compile(r'username (?P<user>[^\s]+) '
r'privilege (\d+)'
r'(?: role ([^\s]+))?'
r'(?: (nopassword))?'
r'(?: secret (0|5|7|sha512) (.+))?'
r'.*$\n(?:username (?P=user) '
r'sshkey (.+)$)?', re.M)
users = self.users_re.findall(self.config, re.M)
resources = dict()
for user in users:
resources.update(self._parse_username(user))
return resources
def _parse_username(self, config):
"""Scans the config block and returns the username as a dict
Args:
config (str): The config block to parse
Returns:
dict: A resource dict that is intended to be merged into the
user resource
"""
(username, priv, role, nopass, fmt, secret, sshkey) = config
resource = dict()
resource['privilege'] = priv
resource['role'] = role
resource['nopassword'] = nopass == 'nopassword'
resource['format'] = fmt
resource['secret'] = secret
if self.version_number >= '4.23':
resource['ssh-key'] = sshkey
else:
resource['sshkey'] = sshkey
return {username: resource}
def create(self, name, nopassword=None, secret=None, encryption=None):
"""Creates a new user on the local system.
Creating users requires either a secret (password) or the nopassword
keyword to be specified.
Args:
name (str): The name of the user to craete
nopassword (bool): Configures the user to be able to authenticate
without a password challenage
secret (str): The secret (password) to assign to this user
encryption (str): Specifies how the secret is encoded. Valid
values are "cleartext", "md5", "sha512". The default is
"cleartext"
Returns:
True if the operation was successful otherwise False
Raises:
TypeError: if the required arguments are not satisfied
"""
if secret is not None:
return self.create_with_secret(name, secret, encryption)
elif nopassword is True:
return self.create_with_nopassword(name)
else:
raise TypeError('either "nopassword" or "secret" must be '
'specified to create a user')
def create_with_secret(self, name, secret, encryption):
"""Creates a new user on the local node
Args:
name (str): The name of the user to craete
secret (str): The secret (password) to assign to this user
encryption (str): Specifies how the secret is encoded. Valid
values are "cleartext", "md5", "sha512". The default is
"cleartext"
Returns:
True if the operation was successful otherwise False
"""
try:
encryption = encryption or DEFAULT_ENCRYPTION
enc = ENCRYPTION_MAP[encryption]
except KeyError:
raise TypeError('encryption must be one of "cleartext", "md5"'
' or "sha512"')
cmd = 'username %s secret %s %s' % (name, enc, secret)
return self.configure(cmd)
def create_with_nopassword(self, name):
"""Creates a new user on the local node
Args:
name (str): The name of the user to create
Returns:
True if the operation was successful otherwise False
"""
return self.configure('username %s nopassword' % name)
def delete(self, name):
"""Deletes the local username from the config
Args:
name (str): The name of the user to delete
Returns:
True if the operation was successful otherwise False
"""
if name == 'admin':
raise TypeError('the admin user cannot be deleted.')
return self.configure('no username %s' % name)
def default(self, name):
"""Configures the local username using the default keyword
Args:
name (str): The name of the user to configure
Returns:
True if the operation was successful otherwise False
"""
return self.configure('default username %s' % name)
def set_privilege(self, name, value=None):
"""Configures the user privilege value in EOS
Args:
name (str): The name of the user to craete
value (int): The privilege value to assign to the user. Valid
values are in the range of 0 to 15
Returns:
True if the operation was successful otherwise False
Raises:
TypeError: if the value is not in the valid range
"""
cmd = 'username %s' % name
if value is not None:
if not isprivilege(value):
raise TypeError('priviledge value must be between 0 and 15')
cmd += ' privilege %s' % value
else:
cmd += ' privilege 1'
return self.configure(cmd)
def set_role(self, name, value=None, default=False, disable=False):
"""Configures the user role vale in EOS
Args:
name (str): The name of the user to create
value (str): The value to configure for the user role
default (bool): Configure the user role using the EOS CLI
default command
disable (bool): Negate the user role using the EOS CLI no command
Returns:
True if the operation was successful otherwise False
"""
cmd = self.command_builder('username %s role' % name, value=value,
default=default, disable=disable)
return self.configure(cmd)
def set_sshkey(self, name, value=None, default=False, disable=False):
"""Configures the user sshkey
Args:
name (str): The name of the user to add the sshkey to
value (str): The value to configure for the sshkey.
default (bool): Configure the sshkey using the EOS CLI
default command
disable (bool): Negate the sshkey using the EOS CLI no command
Returns:
True if the operation was successful otherwise False
"""
if self.version_number >= '4.23':
cmd = self.command_builder('username %s ssh-key' % name,
value=value,
default=default, disable=disable)
else:
cmd = self.command_builder('username %s sshkey' % name,
value=value,
default=default, disable=disable)
return self.configure(cmd)
def instance(node):
"""Returns an instance of Users
This method will create and return an instance of the Users object passing
the value of API to the object. The instance method is required for the
resource to be autoloaded by the Node object
Args:
node (Node): The node argument passes an instance of Node to the
resource
"""
return Users(node)
|
|
# -*- coding: utf8 -*-
from beehive.compat import unicode
from beehive.formatter.ansi_escapes import escapes, up
from beehive.formatter.base import Formatter
from beehive.model_describe import escape_cell, escape_triple_quotes
from beehive.textutil import indent
import sys
# -----------------------------------------------------------------------------
# TERMINAL SUPPORT:
# -----------------------------------------------------------------------------
DEFAULT_WIDTH = 80
DEFAULT_HEIGHT = 24
def get_terminal_size():
if sys.platform == 'windows':
# Autodetecting the size of a Windows command window is left as an
# exercise for the reader. Prizes may be awarded for the best answer.
return (DEFAULT_WIDTH, DEFAULT_HEIGHT)
try:
import fcntl
import termios
import struct
zero_struct = struct.pack('HHHH', 0, 0, 0, 0)
result = fcntl.ioctl(0, termios.TIOCGWINSZ, zero_struct)
h, w, hp, wp = struct.unpack('HHHH', result)
return w or DEFAULT_WIDTH, h or DEFAULT_HEIGHT
except:
return (DEFAULT_WIDTH, DEFAULT_HEIGHT)
# -----------------------------------------------------------------------------
# COLORING SUPPORT:
# -----------------------------------------------------------------------------
class MonochromeFormat(object):
def text(self, text):
assert isinstance(text, unicode)
return text
class ColorFormat(object):
def __init__(self, status):
self.status = status
def text(self, text):
assert isinstance(text, unicode)
return escapes[self.status] + text + escapes['reset']
# -----------------------------------------------------------------------------
# CLASS: PrettyFormatter
# -----------------------------------------------------------------------------
class PrettyFormatter(Formatter):
name = 'pretty'
description = 'Standard colourised pretty formatter'
def __init__(self, stream_opener, config):
super(PrettyFormatter, self).__init__(stream_opener, config)
# -- ENSURE: Output stream is open.
self.stream = self.open()
isatty = getattr(self.stream, "isatty", lambda: True)
stream_supports_colors = isatty()
self.monochrome = not config.color or not stream_supports_colors
self.show_source = config.show_source
self.show_timings = config.show_timings
self.show_multiline = config.show_multiline
self.formats = None
self.display_width = get_terminal_size()[0]
# -- UNUSED: self.tag_statement = None
self.steps = []
self._uri = None
self._match = None
self.statement = None
self.indentations = []
self.step_lines = 0
def reset(self):
# -- UNUSED: self.tag_statement = None
self.steps = []
self._uri = None
self._match = None
self.statement = None
self.indentations = []
self.step_lines = 0
def uri(self, uri):
self.reset()
self._uri = uri
def feature(self, feature):
#self.print_comments(feature.comments, '')
self.print_tags(feature.tags, '')
self.stream.write(u"%s: %s" % (feature.keyword, feature.name))
if self.show_source:
format = self.format('comments')
self.stream.write(format.text(u" # %s" % feature.location))
self.stream.write("\n")
self.print_description(feature.description, ' ', False)
self.stream.flush()
def background(self, background):
self.replay()
self.statement = background
def scenario(self, scenario):
self.replay()
self.statement = scenario
def scenario_outline(self, scenario_outline):
self.replay()
self.statement = scenario_outline
def replay(self):
self.print_statement()
self.print_steps()
self.stream.flush()
def examples(self, examples):
self.replay()
self.stream.write("\n")
self.print_comments(examples.comments, ' ')
self.print_tags(examples.tags, ' ')
self.stream.write(' %s: %s\n' % (examples.keyword, examples.name))
self.print_description(examples.description, ' ')
self.table(examples.rows)
self.stream.flush()
def step(self, step):
self.steps.append(step)
def match(self, match):
self._match = match
self.print_statement()
self.print_step('executing', self._match.arguments,
self._match.location, self.monochrome)
self.stream.flush()
def result(self, result):
if not self.monochrome:
lines = self.step_lines + 1
if self.show_multiline:
if result.table:
lines += len(result.table.rows) + 1
if result.text:
lines += len(result.text.splitlines()) + 2
self.stream.write(up(lines))
arguments = []
location = None
if self._match:
arguments = self._match.arguments
location = self._match.location
self.print_step(result.status, arguments, location, True)
if result.error_message:
self.stream.write(indent(result.error_message.strip(), u' '))
self.stream.write('\n\n')
self.stream.flush()
def arg_format(self, key):
return self.format(key + '_arg')
def format(self, key):
if self.monochrome:
if self.formats is None:
self.formats = MonochromeFormat()
return self.formats
if self.formats is None:
self.formats = {}
format = self.formats.get(key, None)
if format is not None:
return format
format = self.formats[key] = ColorFormat(key)
return format
def eof(self):
self.replay()
self.stream.write('\n')
self.stream.flush()
def table(self, table):
cell_lengths = []
all_rows = [table.headings] + table.rows
for row in all_rows:
lengths = [len(escape_cell(c)) for c in row]
cell_lengths.append(lengths)
max_lengths = []
for col in range(0, len(cell_lengths[0])):
max_lengths.append(max([c[col] for c in cell_lengths]))
for i, row in enumerate(all_rows):
#for comment in row.comments:
# self.stream.write(' %s\n' % comment.value)
self.stream.write(' |')
for j, (cell, max_length) in enumerate(zip(row, max_lengths)):
self.stream.write(' ')
self.stream.write(self.color(cell, None, j))
self.stream.write(' ' * (max_length - cell_lengths[i][j]))
self.stream.write(' |')
self.stream.write('\n')
self.stream.flush()
def doc_string(self, doc_string):
#self.stream.write(' """' + doc_string.content_type + '\n')
prefix = ' '
self.stream.write('%s"""\n' % prefix)
doc_string = escape_triple_quotes(indent(doc_string, prefix))
self.stream.write(doc_string)
self.stream.write('\n%s"""\n' % prefix)
self.stream.flush()
# def doc_string(self, doc_string):
# from beehive.model_describe import ModelDescriptor
# prefix = ' '
# text = ModelDescriptor.describe_docstring(doc_string, prefix)
# self.stream.write(text)
# self.stream.flush()
def exception(self, exception):
exception_text = 'HERP'
self.stream.write(self.failed(exception_text) + '\n')
self.stream.flush()
def color(self, cell, statuses, color):
if statuses:
return escapes['color'] + escapes['reset']
else:
return escape_cell(cell)
def indented_text(self, text, proceed):
if not text:
return u''
if proceed:
indentation = self.indentations.pop(0)
else:
indentation = self.indentations[0]
indentation = u' ' * indentation
return u'%s # %s' % (indentation, text)
def calculate_location_indentations(self):
line_widths = []
for s in [self.statement] + self.steps:
string = s.keyword + ' ' + s.name
line_widths.append(len(string))
max_line_width = max(line_widths)
self.indentations = [max_line_width - width for width in line_widths]
def print_statement(self):
if self.statement is None:
return
self.calculate_location_indentations()
self.stream.write(u"\n")
#self.print_comments(self.statement.comments, ' ')
if hasattr(self.statement, 'tags'):
self.print_tags(self.statement.tags, u' ')
self.stream.write(u" %s: %s " % (self.statement.keyword,
self.statement.name))
location = self.indented_text(unicode(self.statement.location), True)
if self.show_source:
self.stream.write(self.format('comments').text(location))
self.stream.write("\n")
#self.print_description(self.statement.description, u' ')
self.statement = None
def print_steps(self):
while self.steps:
self.print_step('skipped', [], None, True)
def print_step(self, status, arguments, location, proceed):
if proceed:
step = self.steps.pop(0)
else:
step = self.steps[0]
text_format = self.format(status)
arg_format = self.arg_format(status)
#self.print_comments(step.comments, ' ')
self.stream.write(' ')
self.stream.write(text_format.text(step.keyword + ' '))
line_length = 5 + len(step.keyword)
step_name = unicode(step.name)
text_start = 0
for arg in arguments:
if arg.end <= text_start:
# -- SKIP-OVER: Optional and nested regexp args
# - Optional regexp args (unmatched: None).
# - Nested regexp args that are already processed.
continue
# -- VALID, MATCHED ARGUMENT:
assert arg.original is not None
text = step_name[text_start:arg.start]
self.stream.write(text_format.text(text))
line_length += len(text)
self.stream.write(arg_format.text(arg.original))
line_length += len(arg.original)
text_start = arg.end
if text_start != len(step_name):
text = step_name[text_start:]
self.stream.write(text_format.text(text))
line_length += (len(text))
if self.show_source:
location = unicode(location)
if self.show_timings and status in ('passed', 'failed'):
location += ' %0.3fs' % step.duration
location = self.indented_text(location, proceed)
self.stream.write(self.format('comments').text(location))
line_length += len(location)
elif self.show_timings and status in ('passed', 'failed'):
timing = '%0.3fs' % step.duration
timing = self.indented_text(timing, proceed)
self.stream.write(self.format('comments').text(timing))
line_length += len(timing)
self.stream.write("\n")
self.step_lines = int((line_length - 1) / self.display_width)
if self.show_multiline:
if step.text:
self.doc_string(step.text)
if step.table:
self.table(step.table)
def print_tags(self, tags, indentation):
if not tags:
return
line = ' '.join('@' + tag for tag in tags)
self.stream.write(indentation + line + '\n')
def print_comments(self, comments, indentation):
if not comments:
return
self.stream.write(indent([c.value for c in comments], indentation))
self.stream.write('\n')
def print_description(self, description, indentation, newline=True):
if not description:
return
self.stream.write(indent(description, indentation))
if newline:
self.stream.write('\n')
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import socket
import ssl
import sys
import time
import uuid
import eventlet
import greenlet
import kombu
import kombu.connection
import kombu.entity
import kombu.messaging
from oslo.config import cfg
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import network_utils
from ceilometer.openstack.common.rpc import amqp as rpc_amqp
from ceilometer.openstack.common.rpc import common as rpc_common
kombu_opts = [
cfg.StrOpt('kombu_ssl_version',
default='',
help='SSL version to use (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_keyfile',
default='',
help='SSL key file (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_certfile',
default='',
help='SSL cert file (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_ca_certs',
default='',
help=('SSL certification authority file '
'(valid only if SSL enabled)')),
cfg.StrOpt('rabbit_host',
default='localhost',
help='The RabbitMQ broker address where a single node is used'),
cfg.IntOpt('rabbit_port',
default=5672,
help='The RabbitMQ broker port where a single node is used'),
cfg.ListOpt('rabbit_hosts',
default=['$rabbit_host:$rabbit_port'],
help='RabbitMQ HA cluster host:port pairs'),
cfg.BoolOpt('rabbit_use_ssl',
default=False,
help='connect over SSL for RabbitMQ'),
cfg.StrOpt('rabbit_userid',
default='guest',
help='the RabbitMQ userid'),
cfg.StrOpt('rabbit_password',
default='guest',
help='the RabbitMQ password',
secret=True),
cfg.StrOpt('rabbit_virtual_host',
default='/',
help='the RabbitMQ virtual host'),
cfg.IntOpt('rabbit_retry_interval',
default=1,
help='how frequently to retry connecting with RabbitMQ'),
cfg.IntOpt('rabbit_retry_backoff',
default=2,
help='how long to backoff for between retries when connecting '
'to RabbitMQ'),
cfg.IntOpt('rabbit_max_retries',
default=0,
help='maximum retries with trying to connect to RabbitMQ '
'(the default of 0 implies an infinite retry count)'),
cfg.BoolOpt('rabbit_durable_queues',
default=False,
help='use durable queues in RabbitMQ'),
cfg.BoolOpt('rabbit_ha_queues',
default=False,
help='use H/A queues in RabbitMQ (x-ha-policy: all).'
'You need to wipe RabbitMQ database when '
'changing this option.'),
]
cfg.CONF.register_opts(kombu_opts)
LOG = rpc_common.LOG
def _get_queue_arguments(conf):
"""Construct the arguments for declaring a queue.
If the rabbit_ha_queues option is set, we declare a mirrored queue
as described here:
http://www.rabbitmq.com/ha.html
Setting x-ha-policy to all means that the queue will be mirrored
to all nodes in the cluster.
"""
return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {}
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, channel, callback, tag, **kwargs):
"""Declare a queue on an amqp channel.
'channel' is the amqp channel to use
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
queue name, exchange name, and other kombu options are
passed in here as a dictionary.
"""
self.callback = callback
self.tag = str(tag)
self.kwargs = kwargs
self.queue = None
self.reconnect(channel)
def reconnect(self, channel):
"""Re-declare the queue after a rabbit reconnect"""
self.channel = channel
self.kwargs['channel'] = channel
self.queue = kombu.entity.Queue(**self.kwargs)
self.queue.declare()
def consume(self, *args, **kwargs):
"""Actually declare the consumer on the amqp channel. This will
start the flow of messages from the queue. Using the
Connection.iterconsume() iterator will process the messages,
calling the appropriate callback.
If a callback is specified in kwargs, use that. Otherwise,
use the callback passed during __init__()
If kwargs['nowait'] is True, then this call will block until
a message is read.
Messages will automatically be acked if the callback doesn't
raise an exception
"""
options = {'consumer_tag': self.tag}
options['nowait'] = kwargs.get('nowait', False)
callback = kwargs.get('callback', self.callback)
if not callback:
raise ValueError("No callback defined")
def _callback(raw_message):
message = self.channel.message_to_python(raw_message)
try:
msg = rpc_common.deserialize_msg(message.payload)
callback(msg)
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
finally:
message.ack()
self.queue.consume(*args, callback=_callback, **options)
def cancel(self):
"""Cancel the consuming from the queue, if it has started"""
try:
self.queue.cancel(self.tag)
except KeyError, e:
# NOTE(comstud): Kludge to get around a amqplib bug
if str(e) != "u'%s'" % self.tag:
raise
self.queue = None
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'"""
def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
"""Init a 'direct' queue.
'channel' is the amqp channel to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
Other kombu options may be passed
"""
# Default options
options = {'durable': False,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
exchange = kombu.entity.Exchange(name=msg_id,
type='direct',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(DirectConsumer, self).__init__(channel,
callback,
tag,
name=msg_id,
exchange=exchange,
routing_key=msg_id,
**options)
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'"""
def __init__(self, conf, channel, topic, callback, tag, name=None,
exchange_name=None, **kwargs):
"""Init a 'topic' queue.
:param channel: the amqp channel to use
:param topic: the topic to listen on
:paramtype topic: str
:param callback: the callback to call when messages are received
:param tag: a unique ID for the consumer on the channel
:param name: optional queue name, defaults to topic
:paramtype name: str
Other kombu options may be passed as keyword arguments
"""
# Default options
options = {'durable': conf.rabbit_durable_queues,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': False,
'exclusive': False}
options.update(kwargs)
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
exchange = kombu.entity.Exchange(name=exchange_name,
type='topic',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(TopicConsumer, self).__init__(channel,
callback,
tag,
name=name or topic,
exchange=exchange,
routing_key=topic,
**options)
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'"""
def __init__(self, conf, channel, topic, callback, tag, **kwargs):
"""Init a 'fanout' queue.
'channel' is the amqp channel to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
Other kombu options may be passed
"""
unique = uuid.uuid4().hex
exchange_name = '%s_fanout' % topic
queue_name = '%s_fanout_%s' % (topic, unique)
# Default options
options = {'durable': False,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
exchange = kombu.entity.Exchange(name=exchange_name, type='fanout',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(FanoutConsumer, self).__init__(channel, callback, tag,
name=queue_name,
exchange=exchange,
routing_key=topic,
**options)
class Publisher(object):
"""Base Publisher class"""
def __init__(self, channel, exchange_name, routing_key, **kwargs):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.exchange_name = exchange_name
self.routing_key = routing_key
self.kwargs = kwargs
self.reconnect(channel)
def reconnect(self, channel):
"""Re-establish the Producer after a rabbit reconnection"""
self.exchange = kombu.entity.Exchange(name=self.exchange_name,
**self.kwargs)
self.producer = kombu.messaging.Producer(exchange=self.exchange,
channel=channel,
routing_key=self.routing_key)
def send(self, msg, timeout=None):
"""Send a message"""
if timeout:
#
# AMQP TTL is in milliseconds when set in the header.
#
self.producer.publish(msg, headers={'ttl': (timeout * 1000)})
else:
self.producer.publish(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'"""
def __init__(self, conf, channel, msg_id, **kwargs):
"""init a 'direct' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': False,
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
super(DirectPublisher, self).__init__(channel, msg_id, msg_id,
type='direct', **options)
class TopicPublisher(Publisher):
"""Publisher class for 'topic'"""
def __init__(self, conf, channel, topic, **kwargs):
"""init a 'topic' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': conf.rabbit_durable_queues,
'auto_delete': False,
'exclusive': False}
options.update(kwargs)
exchange_name = rpc_amqp.get_control_exchange(conf)
super(TopicPublisher, self).__init__(channel,
exchange_name,
topic,
type='topic',
**options)
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'"""
def __init__(self, conf, channel, topic, **kwargs):
"""init a 'fanout' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': False,
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic,
None, type='fanout', **options)
class NotifyPublisher(TopicPublisher):
"""Publisher class for 'notify'"""
def __init__(self, conf, channel, topic, **kwargs):
self.durable = kwargs.pop('durable', conf.rabbit_durable_queues)
self.queue_arguments = _get_queue_arguments(conf)
super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs)
def reconnect(self, channel):
super(NotifyPublisher, self).reconnect(channel)
# NOTE(jerdfelt): Normally the consumer would create the queue, but
# we do this to ensure that messages don't get dropped if the
# consumer is started after we do
queue = kombu.entity.Queue(channel=channel,
exchange=self.exchange,
durable=self.durable,
name=self.routing_key,
routing_key=self.routing_key,
queue_arguments=self.queue_arguments)
queue.declare()
class Connection(object):
"""Connection object."""
pool = None
def __init__(self, conf, server_params=None):
self.consumers = []
self.consumer_thread = None
self.proxy_callbacks = []
self.conf = conf
self.max_retries = self.conf.rabbit_max_retries
# Try forever?
if self.max_retries <= 0:
self.max_retries = None
self.interval_start = self.conf.rabbit_retry_interval
self.interval_stepping = self.conf.rabbit_retry_backoff
# max retry-interval = 30 seconds
self.interval_max = 30
self.memory_transport = False
if server_params is None:
server_params = {}
# Keys to translate from server_params to kombu params
server_params_to_kombu_params = {'username': 'userid'}
ssl_params = self._fetch_ssl_params()
params_list = []
for adr in self.conf.rabbit_hosts:
hostname, port = network_utils.parse_host_port(
adr, default_port=self.conf.rabbit_port)
params = {
'hostname': hostname,
'port': port,
'userid': self.conf.rabbit_userid,
'password': self.conf.rabbit_password,
'virtual_host': self.conf.rabbit_virtual_host,
}
for sp_key, value in server_params.iteritems():
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
params[p_key] = value
if self.conf.fake_rabbit:
params['transport'] = 'memory'
if self.conf.rabbit_use_ssl:
params['ssl'] = ssl_params
params_list.append(params)
self.params_list = params_list
self.memory_transport = self.conf.fake_rabbit
self.connection = None
self.reconnect()
def _fetch_ssl_params(self):
"""Handles fetching what ssl params
should be used for the connection (if any)"""
ssl_params = dict()
# http://docs.python.org/library/ssl.html - ssl.wrap_socket
if self.conf.kombu_ssl_version:
ssl_params['ssl_version'] = self.conf.kombu_ssl_version
if self.conf.kombu_ssl_keyfile:
ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile
if self.conf.kombu_ssl_certfile:
ssl_params['certfile'] = self.conf.kombu_ssl_certfile
if self.conf.kombu_ssl_ca_certs:
ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs
# We might want to allow variations in the
# future with this?
ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
if not ssl_params:
# Just have the default behavior
return True
else:
# Return the extended behavior
return ssl_params
def _connect(self, params):
"""Connect to rabbit. Re-establish any queues that may have
been declared before if we are reconnecting. Exceptions should
be handled by the caller.
"""
if self.connection:
LOG.info(_("Reconnecting to AMQP server on "
"%(hostname)s:%(port)d") % params)
try:
self.connection.release()
except self.connection_errors:
pass
# Setting this in case the next statement fails, though
# it shouldn't be doing any network operations, yet.
self.connection = None
self.connection = kombu.connection.BrokerConnection(**params)
self.connection_errors = self.connection.connection_errors
if self.memory_transport:
# Kludge to speed up tests.
self.connection.transport.polling_interval = 0.0
self.consumer_num = itertools.count(1)
self.connection.connect()
self.channel = self.connection.channel()
# work around 'memory' transport bug in 1.1.3
if self.memory_transport:
self.channel._new_queue('ae.undeliver')
for consumer in self.consumers:
consumer.reconnect(self.channel)
LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') %
params)
def reconnect(self):
"""Handles reconnecting and re-establishing queues.
Will retry up to self.max_retries number of times.
self.max_retries = 0 means to retry forever.
Sleep between tries, starting at self.interval_start
seconds, backing off self.interval_stepping number of seconds
each attempt.
"""
attempt = 0
while True:
params = self.params_list[attempt % len(self.params_list)]
attempt += 1
try:
self._connect(params)
return
except (IOError, self.connection_errors) as e:
pass
except Exception, e:
# NOTE(comstud): Unfortunately it's possible for amqplib
# to return an error not covered by its transport
# connection_errors in the case of a timeout waiting for
# a protocol response. (See paste link in LP888621)
# So, we check all exceptions for 'timeout' in them
# and try to reconnect in this case.
if 'timeout' not in str(e):
raise
log_info = {}
log_info['err_str'] = str(e)
log_info['max_retries'] = self.max_retries
log_info.update(params)
if self.max_retries and attempt == self.max_retries:
LOG.error(_('Unable to connect to AMQP server on '
'%(hostname)s:%(port)d after %(max_retries)d '
'tries: %(err_str)s') % log_info)
# NOTE(comstud): Copied from original code. There's
# really no better recourse because if this was a queue we
# need to consume on, we have no way to consume anymore.
sys.exit(1)
if attempt == 1:
sleep_time = self.interval_start or 1
elif attempt > 1:
sleep_time += self.interval_stepping
if self.interval_max:
sleep_time = min(sleep_time, self.interval_max)
log_info['sleep_time'] = sleep_time
LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
'unreachable: %(err_str)s. Trying again in '
'%(sleep_time)d seconds.') % log_info)
time.sleep(sleep_time)
def ensure(self, error_callback, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except (self.connection_errors, socket.timeout, IOError), e:
if error_callback:
error_callback(e)
except Exception, e:
# NOTE(comstud): Unfortunately it's possible for amqplib
# to return an error not covered by its transport
# connection_errors in the case of a timeout waiting for
# a protocol response. (See paste link in LP888621)
# So, we check all exceptions for 'timeout' in them
# and try to reconnect in this case.
if 'timeout' not in str(e):
raise
if error_callback:
error_callback(e)
self.reconnect()
def get_channel(self):
"""Convenience call for bin/clear_rabbit_queues"""
return self.channel
def close(self):
"""Close/release this connection"""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.connection.release()
self.connection = None
def reset(self):
"""Reset a connection so it can be used again"""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.channel.close()
self.channel = self.connection.channel()
# work around 'memory' transport bug in 1.1.3
if self.memory_transport:
self.channel._new_queue('ae.undeliver')
self.consumers = []
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
consumer = consumer_cls(self.conf, self.channel, topic, callback,
self.consumer_num.next())
self.consumers.append(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers"""
info = {'do_consume': True}
def _error_callback(exc):
if isinstance(exc, socket.timeout):
LOG.debug(_('Timed out waiting for RPC response: %s') %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
str(exc))
info['do_consume'] = True
def _consume():
if info['do_consume']:
queues_head = self.consumers[:-1]
queues_tail = self.consumers[-1]
for queue in queues_head:
queue.consume(nowait=True)
queues_tail.consume(nowait=False)
info['do_consume'] = False
return self.connection.drain_events(timeout=timeout)
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self):
"""Cancel a consumer thread"""
if self.consumer_thread is not None:
self.consumer_thread.kill()
try:
self.consumer_thread.wait()
except greenlet.GreenletExit:
pass
self.consumer_thread = None
def wait_on_proxy_callbacks(self):
"""Wait for all proxy callback threads to exit."""
for proxy_cb in self.proxy_callbacks:
proxy_cb.wait()
def publisher_send(self, cls, topic, msg, timeout=None, **kwargs):
"""Send to a publisher based on the publisher class"""
def _error_callback(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publish():
publisher = cls(self.conf, self.channel, topic, **kwargs)
publisher.send(msg, timeout)
self.ensure(_error_callback, _publish)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
),
topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer"""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message"""
self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message"""
self.publisher_send(TopicPublisher, topic, msg, timeout)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message"""
self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic"""
self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs)
def consume(self, limit=None):
"""Consume from all queues/consumers"""
it = self.iterconsume(limit=limit)
while True:
try:
it.next()
except StopIteration:
return
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread"""
def _consumer_thread():
try:
self.consume()
except greenlet.GreenletExit:
return
if self.consumer_thread is None:
self.consumer_thread = eventlet.spawn(_consumer_thread)
return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object"""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
if fanout:
self.declare_fanout_consumer(topic, proxy_cb)
else:
self.declare_topic_consumer(topic, proxy_cb)
def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object"""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
self.declare_topic_consumer(topic, proxy_cb, pool_name)
def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
"""
callback_wrapper = rpc_amqp.CallbackWrapper(
conf=self.conf,
callback=callback,
connection_pool=rpc_amqp.get_connection_pool(self.conf,
Connection),
)
self.proxy_callbacks.append(callback_wrapper)
self.declare_topic_consumer(
queue_name=pool_name,
topic=topic,
exchange_name=exchange_name,
callback=callback_wrapper,
)
def create_connection(conf, new=True):
"""Create a connection"""
return rpc_amqp.create_connection(
conf, new,
rpc_amqp.get_connection_pool(conf, Connection))
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
return rpc_amqp.multicall(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
return rpc_amqp.call(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def cast(conf, context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
return rpc_amqp.cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast(conf, context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
return rpc_amqp.fanout_cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a topic to a specific server."""
return rpc_amqp.cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
return rpc_amqp.fanout_cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def notify(conf, context, topic, msg, envelope):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection),
envelope)
def cleanup():
return rpc_amqp.cleanup(Connection.pool)
|
|
# Created by Pearu Peterson, September 2002
from __future__ import division, print_function, absolute_import
__usage__ = """
Build fftpack:
python setup_fftpack.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.fftpack.test()'
Run tests if fftpack is not installed:
python tests/test_basic.py
"""
from numpy.testing import (assert_, assert_equal, assert_array_almost_equal,
assert_array_almost_equal_nulp, assert_array_less)
import pytest
from pytest import raises as assert_raises
from scipy.fftpack import ifft, fft, fftn, ifftn, rfft, irfft, fft2
from scipy.fftpack import _fftpack as fftpack
from scipy.fftpack.basic import _is_safe_size
from numpy import (arange, add, array, asarray, zeros, dot, exp, pi,
swapaxes, double, cdouble)
import numpy as np
import numpy.fft
from numpy.random import rand
# "large" composite numbers supported by FFTPACK
LARGE_COMPOSITE_SIZES = [
2**13,
2**5 * 3**5,
2**3 * 3**3 * 5**2,
]
SMALL_COMPOSITE_SIZES = [
2,
2*3*5,
2*2*3*3,
]
# prime
LARGE_PRIME_SIZES = [
2011
]
SMALL_PRIME_SIZES = [
29
]
def _assert_close_in_norm(x, y, rtol, size, rdt):
# helper function for testing
err_msg = "size: %s rdt: %s" % (size, rdt)
assert_array_less(np.linalg.norm(x - y), rtol*np.linalg.norm(x), err_msg)
def random(size):
return rand(*size)
def get_mat(n):
data = arange(n)
data = add.outer(data, data)
return data
def direct_dft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = -arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)
return y
def direct_idft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)/n
return y
def direct_dftn(x):
x = asarray(x)
for axis in range(len(x.shape)):
x = fft(x, axis=axis)
return x
def direct_idftn(x):
x = asarray(x)
for axis in range(len(x.shape)):
x = ifft(x, axis=axis)
return x
def direct_rdft(x):
x = asarray(x)
n = len(x)
w = -arange(n)*(2j*pi/n)
r = zeros(n, dtype=double)
for i in range(n//2+1):
y = dot(exp(i*w), x)
if i:
r[2*i-1] = y.real
if 2*i < n:
r[2*i] = y.imag
else:
r[0] = y.real
return r
def direct_irdft(x):
x = asarray(x)
n = len(x)
x1 = zeros(n, dtype=cdouble)
for i in range(n//2+1):
if i:
if 2*i < n:
x1[i] = x[2*i-1] + 1j*x[2*i]
x1[n-i] = x[2*i-1] - 1j*x[2*i]
else:
x1[i] = x[2*i-1]
else:
x1[0] = x[0]
return direct_idft(x1).real
class _TestFFTBase(object):
def setup_method(self):
self.cdt = None
self.rdt = None
np.random.seed(1234)
def test_definition(self):
x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype=self.cdt)
y = fft(x)
assert_equal(y.dtype, self.cdt)
y1 = direct_dft(x)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4+0j,5], dtype=self.cdt)
assert_array_almost_equal(fft(x),direct_dft(x))
def test_n_argument_real(self):
x1 = np.array([1,2,3,4], dtype=self.rdt)
x2 = np.array([1,2,3,4], dtype=self.rdt)
y = fft([x1,x2],n=4)
assert_equal(y.dtype, self.cdt)
assert_equal(y.shape,(2,4))
assert_array_almost_equal(y[0],direct_dft(x1))
assert_array_almost_equal(y[1],direct_dft(x2))
def _test_n_argument_complex(self):
x1 = np.array([1,2,3,4+1j], dtype=self.cdt)
x2 = np.array([1,2,3,4+1j], dtype=self.cdt)
y = fft([x1,x2],n=4)
assert_equal(y.dtype, self.cdt)
assert_equal(y.shape,(2,4))
assert_array_almost_equal(y[0],direct_dft(x1))
assert_array_almost_equal(y[1],direct_dft(x2))
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = list(range(n))
y = fftpack.zfft(x)
y2 = numpy.fft.fft(x)
assert_array_almost_equal(y,y2)
y = fftpack.zrfft(x)
assert_array_almost_equal(y,y2)
def test_invalid_sizes(self):
assert_raises(ValueError, fft, [])
assert_raises(ValueError, fft, [[1,1],[2,2]], -5)
def test__is_safe_size(self):
vals = [(0, True), (1, True), (2, True), (3, True), (4, True), (5, True), (6, True), (7, False),
(15, True), (16, True), (17, False), (18, True), (21, False), (25, True), (50, True),
(120, True), (210, False)]
for n, is_safe in vals:
assert_equal(_is_safe_size(n), is_safe)
class TestDoubleFFT(_TestFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestSingleFFT(_TestFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
@pytest.mark.xfail(run=False, reason="single-precision FFT implementation is partially disabled, until accuracy issues with large prime powers are resolved")
def test_notice(self):
pass
class TestFloat16FFT(object):
def test_1_argument_real(self):
x1 = np.array([1, 2, 3, 4], dtype=np.float16)
y = fft(x1, n=4)
assert_equal(y.dtype, np.complex64)
assert_equal(y.shape, (4, ))
assert_array_almost_equal(y, direct_dft(x1.astype(np.float32)))
def test_n_argument_real(self):
x1 = np.array([1, 2, 3, 4], dtype=np.float16)
x2 = np.array([1, 2, 3, 4], dtype=np.float16)
y = fft([x1, x2], n=4)
assert_equal(y.dtype, np.complex64)
assert_equal(y.shape, (2, 4))
assert_array_almost_equal(y[0], direct_dft(x1.astype(np.float32)))
assert_array_almost_equal(y[1], direct_dft(x2.astype(np.float32)))
class _TestIFFTBase(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt)
y = ifft(x)
y1 = direct_idft(x)
assert_equal(y.dtype, self.cdt)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4+0j,5], self.cdt)
assert_array_almost_equal(ifft(x),direct_idft(x))
def test_definition_real(self):
x = np.array([1,2,3,4,1,2,3,4], self.rdt)
y = ifft(x)
assert_equal(y.dtype, self.cdt)
y1 = direct_idft(x)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4,5], dtype=self.rdt)
assert_equal(y.dtype, self.cdt)
assert_array_almost_equal(ifft(x),direct_idft(x))
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = list(range(n))
y = fftpack.zfft(x,direction=-1)
y2 = numpy.fft.ifft(x)
assert_array_almost_equal(y,y2)
y = fftpack.zrfft(x,direction=-1)
assert_array_almost_equal(y,y2)
def test_random_complex(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.cdt)
x = random([size]).astype(self.cdt) + 1j*x
y1 = ifft(fft(x))
y2 = fft(ifft(x))
assert_equal(y1.dtype, self.cdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x)
assert_array_almost_equal(y2, x)
def test_random_real(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.rdt)
y1 = ifft(fft(x))
y2 = fft(ifft(x))
assert_equal(y1.dtype, self.cdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x)
assert_array_almost_equal(y2, x)
def test_size_accuracy(self):
# Sanity check for the accuracy for prime and non-prime sized inputs
if self.rdt == np.float32:
rtol = 1e-5
elif self.rdt == np.float64:
rtol = 1e-10
for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
np.random.seed(1234)
x = np.random.rand(size).astype(self.rdt)
y = ifft(fft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
y = fft(ifft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
x = (x + 1j*np.random.rand(size)).astype(self.cdt)
y = ifft(fft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
y = fft(ifft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
def test_invalid_sizes(self):
assert_raises(ValueError, ifft, [])
assert_raises(ValueError, ifft, [[1,1],[2,2]], -5)
class TestDoubleIFFT(_TestIFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestSingleIFFT(_TestIFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
class _TestRFFTBase(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]:
x = np.array(t, dtype=self.rdt)
y = rfft(x)
y1 = direct_rdft(x)
assert_array_almost_equal(y,y1)
assert_equal(y.dtype, self.rdt)
def test_djbfft(self):
from numpy.fft import fft as numpy_fft
for i in range(2,14):
n = 2**i
x = list(range(n))
y2 = numpy_fft(x)
y1 = zeros((n,),dtype=double)
y1[0] = y2[0].real
y1[-1] = y2[n//2].real
for k in range(1, n//2):
y1[2*k-1] = y2[k].real
y1[2*k] = y2[k].imag
y = fftpack.drfft(x)
assert_array_almost_equal(y,y1)
def test_invalid_sizes(self):
assert_raises(ValueError, rfft, [])
assert_raises(ValueError, rfft, [[1,1],[2,2]], -5)
# See gh-5790
class MockSeries(object):
def __init__(self, data):
self.data = np.asarray(data)
def __getattr__(self, item):
try:
return getattr(self.data, item)
except AttributeError:
raise AttributeError(("'MockSeries' object "
"has no attribute '{attr}'".
format(attr=item)))
def test_non_ndarray_with_dtype(self):
x = np.array([1., 2., 3., 4., 5.])
xs = _TestRFFTBase.MockSeries(x)
expected = [1, 2, 3, 4, 5]
out = rfft(xs)
# Data should not have been overwritten
assert_equal(x, expected)
assert_equal(xs.data, expected)
class TestRFFTDouble(_TestRFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestRFFTSingle(_TestRFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
class _TestIRFFTBase(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x1 = [1,2,3,4,1,2,3,4]
x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j]
x2 = [1,2,3,4,1,2,3,4,5]
x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j]
def _test(x, xr):
y = irfft(np.array(x, dtype=self.rdt))
y1 = direct_irdft(x)
assert_equal(y.dtype, self.rdt)
assert_array_almost_equal(y,y1, decimal=self.ndec)
assert_array_almost_equal(y,ifft(xr), decimal=self.ndec)
_test(x1, x1_1)
_test(x2, x2_1)
def test_djbfft(self):
from numpy.fft import ifft as numpy_ifft
for i in range(2,14):
n = 2**i
x = list(range(n))
x1 = zeros((n,),dtype=cdouble)
x1[0] = x[0]
for k in range(1, n//2):
x1[k] = x[2*k-1]+1j*x[2*k]
x1[n-k] = x[2*k-1]-1j*x[2*k]
x1[n//2] = x[-1]
y1 = numpy_ifft(x1)
y = fftpack.drfft(x,direction=-1)
assert_array_almost_equal(y,y1)
def test_random_real(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.rdt)
y1 = irfft(rfft(x))
y2 = rfft(irfft(x))
assert_equal(y1.dtype, self.rdt)
assert_equal(y2.dtype, self.rdt)
assert_array_almost_equal(y1, x, decimal=self.ndec,
err_msg="size=%d" % size)
assert_array_almost_equal(y2, x, decimal=self.ndec,
err_msg="size=%d" % size)
def test_size_accuracy(self):
# Sanity check for the accuracy for prime and non-prime sized inputs
if self.rdt == np.float32:
rtol = 1e-5
elif self.rdt == np.float64:
rtol = 1e-10
for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
np.random.seed(1234)
x = np.random.rand(size).astype(self.rdt)
y = irfft(rfft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
y = rfft(irfft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
def test_invalid_sizes(self):
assert_raises(ValueError, irfft, [])
assert_raises(ValueError, irfft, [[1,1],[2,2]], -5)
# self.ndec is bogus; we should have a assert_array_approx_equal for number of
# significant digits
class TestIRFFTDouble(_TestIRFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
self.ndec = 14
class TestIRFFTSingle(_TestIRFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
self.ndec = 5
class Testfft2(object):
def setup_method(self):
np.random.seed(1234)
def test_regression_244(self):
"""FFT returns wrong result with axes parameter."""
# fftn (and hence fft2) used to break when both axes and shape were
# used
x = numpy.ones((4, 4, 2))
y = fft2(x, shape=(8, 8), axes=(-3, -2))
y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2))
assert_array_almost_equal(y, y_r)
def test_invalid_sizes(self):
assert_raises(ValueError, fft2, [[]])
assert_raises(ValueError, fft2, [[1, 1], [2, 2]], (4, -3))
class TestFftnSingle(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(np.array(x, np.float32))
assert_(y.dtype == np.complex64,
msg="double precision output with single precision")
y_r = np.array(fftn(x), np.complex64)
assert_array_almost_equal_nulp(y, y_r)
@pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
def test_size_accuracy_small(self, size):
x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
y1 = fftn(x.real.astype(np.float32))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2000)
@pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
def test_size_accuracy_large(self, size):
x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
y1 = fftn(x.real.astype(np.float32))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2000)
def test_definition_float16(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(np.array(x, np.float16))
assert_equal(y.dtype, np.complex64)
y_r = np.array(fftn(x), np.complex64)
assert_array_almost_equal_nulp(y, y_r)
@pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
def test_float16_input_small(self, size):
x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
y1 = fftn(x.real.astype(np.float16))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 5e5)
@pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
def test_float16_input_large(self, size):
x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
y1 = fftn(x.real.astype(np.float16))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2e6)
class TestFftn(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(x)
assert_array_almost_equal(y, direct_dftn(x))
x = random((20, 26))
assert_array_almost_equal(fftn(x), direct_dftn(x))
x = random((5, 4, 3, 20))
assert_array_almost_equal(fftn(x), direct_dftn(x))
def test_axes_argument(self):
# plane == ji_plane, x== kji_space
plane1 = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
plane2 = [[10, 11, 12],
[13, 14, 15],
[16, 17, 18]]
plane3 = [[19, 20, 21],
[22, 23, 24],
[25, 26, 27]]
ki_plane1 = [[1, 2, 3],
[10, 11, 12],
[19, 20, 21]]
ki_plane2 = [[4, 5, 6],
[13, 14, 15],
[22, 23, 24]]
ki_plane3 = [[7, 8, 9],
[16, 17, 18],
[25, 26, 27]]
jk_plane1 = [[1, 10, 19],
[4, 13, 22],
[7, 16, 25]]
jk_plane2 = [[2, 11, 20],
[5, 14, 23],
[8, 17, 26]]
jk_plane3 = [[3, 12, 21],
[6, 15, 24],
[9, 18, 27]]
kj_plane1 = [[1, 4, 7],
[10, 13, 16], [19, 22, 25]]
kj_plane2 = [[2, 5, 8],
[11, 14, 17], [20, 23, 26]]
kj_plane3 = [[3, 6, 9],
[12, 15, 18], [21, 24, 27]]
ij_plane1 = [[1, 4, 7],
[2, 5, 8],
[3, 6, 9]]
ij_plane2 = [[10, 13, 16],
[11, 14, 17],
[12, 15, 18]]
ij_plane3 = [[19, 22, 25],
[20, 23, 26],
[21, 24, 27]]
ik_plane1 = [[1, 10, 19],
[2, 11, 20],
[3, 12, 21]]
ik_plane2 = [[4, 13, 22],
[5, 14, 23],
[6, 15, 24]]
ik_plane3 = [[7, 16, 25],
[8, 17, 26],
[9, 18, 27]]
ijk_space = [jk_plane1, jk_plane2, jk_plane3]
ikj_space = [kj_plane1, kj_plane2, kj_plane3]
jik_space = [ik_plane1, ik_plane2, ik_plane3]
jki_space = [ki_plane1, ki_plane2, ki_plane3]
kij_space = [ij_plane1, ij_plane2, ij_plane3]
x = array([plane1, plane2, plane3])
assert_array_almost_equal(fftn(x),
fftn(x, axes=(-3, -2, -1))) # kji_space
assert_array_almost_equal(fftn(x), fftn(x, axes=(0, 1, 2)))
assert_array_almost_equal(fftn(x, axes=(0, 2)), fftn(x, axes=(0, -1)))
y = fftn(x, axes=(2, 1, 0)) # ijk_space
assert_array_almost_equal(swapaxes(y, -1, -3), fftn(ijk_space))
y = fftn(x, axes=(2, 0, 1)) # ikj_space
assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -1, -2),
fftn(ikj_space))
y = fftn(x, axes=(1, 2, 0)) # jik_space
assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -3, -2),
fftn(jik_space))
y = fftn(x, axes=(1, 0, 2)) # jki_space
assert_array_almost_equal(swapaxes(y, -2, -3), fftn(jki_space))
y = fftn(x, axes=(0, 2, 1)) # kij_space
assert_array_almost_equal(swapaxes(y, -2, -1), fftn(kij_space))
y = fftn(x, axes=(-2, -1)) # ji_plane
assert_array_almost_equal(fftn(plane1), y[0])
assert_array_almost_equal(fftn(plane2), y[1])
assert_array_almost_equal(fftn(plane3), y[2])
y = fftn(x, axes=(1, 2)) # ji_plane
assert_array_almost_equal(fftn(plane1), y[0])
assert_array_almost_equal(fftn(plane2), y[1])
assert_array_almost_equal(fftn(plane3), y[2])
y = fftn(x, axes=(-3, -2)) # kj_plane
assert_array_almost_equal(fftn(x[:, :, 0]), y[:, :, 0])
assert_array_almost_equal(fftn(x[:, :, 1]), y[:, :, 1])
assert_array_almost_equal(fftn(x[:, :, 2]), y[:, :, 2])
y = fftn(x, axes=(-3, -1)) # ki_plane
assert_array_almost_equal(fftn(x[:, 0, :]), y[:, 0, :])
assert_array_almost_equal(fftn(x[:, 1, :]), y[:, 1, :])
assert_array_almost_equal(fftn(x[:, 2, :]), y[:, 2, :])
y = fftn(x, axes=(-1, -2)) # ij_plane
assert_array_almost_equal(fftn(ij_plane1), swapaxes(y[0], -2, -1))
assert_array_almost_equal(fftn(ij_plane2), swapaxes(y[1], -2, -1))
assert_array_almost_equal(fftn(ij_plane3), swapaxes(y[2], -2, -1))
y = fftn(x, axes=(-1, -3)) # ik_plane
assert_array_almost_equal(fftn(ik_plane1),
swapaxes(y[:, 0, :], -1, -2))
assert_array_almost_equal(fftn(ik_plane2),
swapaxes(y[:, 1, :], -1, -2))
assert_array_almost_equal(fftn(ik_plane3),
swapaxes(y[:, 2, :], -1, -2))
y = fftn(x, axes=(-2, -3)) # jk_plane
assert_array_almost_equal(fftn(jk_plane1),
swapaxes(y[:, :, 0], -1, -2))
assert_array_almost_equal(fftn(jk_plane2),
swapaxes(y[:, :, 1], -1, -2))
assert_array_almost_equal(fftn(jk_plane3),
swapaxes(y[:, :, 2], -1, -2))
y = fftn(x, axes=(-1,)) # i_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[i, j, :]), y[i, j, :])
y = fftn(x, axes=(-2,)) # j_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[i, :, j]), y[i, :, j])
y = fftn(x, axes=(0,)) # k_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[:, i, j]), y[:, i, j])
y = fftn(x, axes=()) # point
assert_array_almost_equal(y, x)
def test_shape_argument(self):
small_x = [[1, 2, 3],
[4, 5, 6]]
large_x1 = [[1, 2, 3, 0],
[4, 5, 6, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
y = fftn(small_x, shape=(4, 4))
assert_array_almost_equal(y, fftn(large_x1))
y = fftn(small_x, shape=(3, 4))
assert_array_almost_equal(y, fftn(large_x1[:-1]))
def test_shape_axes_argument(self):
small_x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
large_x1 = array([[1, 2, 3, 0],
[4, 5, 6, 0],
[7, 8, 9, 0],
[0, 0, 0, 0]])
y = fftn(small_x, shape=(4, 4), axes=(-2, -1))
assert_array_almost_equal(y, fftn(large_x1))
y = fftn(small_x, shape=(4, 4), axes=(-1, -2))
assert_array_almost_equal(y, swapaxes(
fftn(swapaxes(large_x1, -1, -2)), -1, -2))
def test_shape_axes_argument2(self):
# Change shape of the last axis
x = numpy.random.random((10, 5, 3, 7))
y = fftn(x, axes=(-1,), shape=(8,))
assert_array_almost_equal(y, fft(x, axis=-1, n=8))
# Change shape of an arbitrary axis which is not the last one
x = numpy.random.random((10, 5, 3, 7))
y = fftn(x, axes=(-2,), shape=(8,))
assert_array_almost_equal(y, fft(x, axis=-2, n=8))
# Change shape of axes: cf #244, where shape and axes were mixed up
x = numpy.random.random((4, 4, 2))
y = fftn(x, axes=(-3, -2), shape=(8, 8))
assert_array_almost_equal(y,
numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8)))
def test_shape_argument_more(self):
x = zeros((4, 4, 2))
with assert_raises(ValueError,
match="when given, axes and shape arguments"
" have to be of the same length"):
fftn(x, shape=(8, 8, 2, 1))
def test_invalid_sizes(self):
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[1 0\]\) specified"):
fftn([[]])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[ 4 -3\]\) specified"):
fftn([[1, 1], [2, 2]], (4, -3))
class TestIfftn(object):
dtype = None
cdtype = None
def setup_method(self):
np.random.seed(1234)
@pytest.mark.parametrize('dtype,cdtype,maxnlp',
[(np.float64, np.complex128, 2000),
(np.float32, np.complex64, 3500)])
def test_definition(self, dtype, cdtype, maxnlp):
x = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=dtype)
y = ifftn(x)
assert_equal(y.dtype, cdtype)
assert_array_almost_equal_nulp(y, direct_idftn(x), maxnlp)
x = random((20, 26))
assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
x = random((5, 4, 3, 20))
assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
@pytest.mark.parametrize('maxnlp', [2000, 3500])
@pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92])
def test_random_complex(self, maxnlp, size):
x = random([size, size]) + 1j*random([size, size])
assert_array_almost_equal_nulp(ifftn(fftn(x)), x, maxnlp)
assert_array_almost_equal_nulp(fftn(ifftn(x)), x, maxnlp)
def test_invalid_sizes(self):
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[1 0\]\) specified"):
ifftn([[]])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[ 4 -3\]\) specified"):
ifftn([[1, 1], [2, 2]], (4, -3))
class TestLongDoubleFailure(object):
def setup_method(self):
np.random.seed(1234)
def test_complex(self):
if np.dtype(np.longcomplex).itemsize == np.dtype(complex).itemsize:
# longdouble == double; so fft is supported
return
x = np.random.randn(10).astype(np.longdouble) + \
1j * np.random.randn(10).astype(np.longdouble)
for f in [fft, ifft]:
try:
f(x)
raise AssertionError("Type {0} not supported but does not fail" %
np.longcomplex)
except ValueError:
pass
def test_real(self):
if np.dtype(np.longdouble).itemsize == np.dtype(np.double).itemsize:
# longdouble == double; so fft is supported
return
x = np.random.randn(10).astype(np.longcomplex)
for f in [fft, ifft]:
try:
f(x)
raise AssertionError("Type %r not supported but does not fail" %
np.longcomplex)
except ValueError:
pass
class FakeArray(object):
def __init__(self, data):
self._data = data
self.__array_interface__ = data.__array_interface__
class FakeArray2(object):
def __init__(self, data):
self._data = data
def __array__(self):
return self._data
class TestOverwrite(object):
"""Check input overwrite behavior of the FFT functions."""
real_dtypes = [np.float32, np.float64]
dtypes = real_dtypes + [np.complex64, np.complex128]
fftsizes = [8, 16, 32]
def _check(self, x, routine, fftsize, axis, overwrite_x, should_overwrite):
x2 = x.copy()
for fake in [lambda x: x, FakeArray, FakeArray2]:
routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x)
sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % (
routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
if not should_overwrite:
assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes,
fftsize, overwrite_x):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
should_overwrite = (overwrite_x
and dtype in overwritable_dtypes
and fftsize <= shape[axis]
and (len(shape) == 1 or
(axis % len(shape) == len(shape)-1
and fftsize == shape[axis])))
self._check(data, routine, fftsize, axis,
overwrite_x=overwrite_x,
should_overwrite=should_overwrite)
@pytest.mark.parametrize('dtype', dtypes)
@pytest.mark.parametrize('fftsize', fftsizes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), -1),
((16, 2), 0),
((2, 16), 1)])
def test_fft_ifft(self, dtype, fftsize, overwrite_x, shape, axes):
overwritable = (np.complex128, np.complex64)
self._check_1d(fft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
self._check_1d(ifft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
@pytest.mark.parametrize('dtype', real_dtypes)
@pytest.mark.parametrize('fftsize', fftsizes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), -1),
((16, 2), 0),
((2, 16), 1)])
def test_rfft_irfft(self, dtype, fftsize, overwrite_x, shape, axes):
overwritable = self.real_dtypes
self._check_1d(irfft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
self._check_1d(rfft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes,
overwrite_x):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
def fftshape_iter(shp):
if len(shp) <= 0:
yield ()
else:
for j in (shp[0]//2, shp[0], shp[0]*2):
for rest in fftshape_iter(shp[1:]):
yield (j,) + rest
if axes is None:
part_shape = shape
else:
part_shape = tuple(np.take(shape, axes))
for fftshape in fftshape_iter(part_shape):
should_overwrite = (overwrite_x
and data.ndim == 1
and np.all([x < y for x, y in zip(fftshape,
part_shape)])
and dtype in overwritable_dtypes)
self._check(data, routine, fftshape, axes,
overwrite_x=overwrite_x,
should_overwrite=should_overwrite)
if data.ndim > 1:
# check fortran order: it never overwrites
self._check(data.T, routine, fftshape, axes,
overwrite_x=overwrite_x,
should_overwrite=False)
@pytest.mark.parametrize('dtype', dtypes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), None),
((16,), (0,)),
((16, 2), (0,)),
((2, 16), (1,)),
((8, 16), None),
((8, 16), (0, 1)),
((8, 16, 2), (0, 1)),
((8, 16, 2), (1, 2)),
((8, 16, 2), (0,)),
((8, 16, 2), (1,)),
((8, 16, 2), (2,)),
((8, 16, 2), None),
((8, 16, 2), (0, 1, 2))])
def test_fftn_ifftn(self, dtype, overwrite_x, shape, axes):
overwritable = (np.complex128, np.complex64)
self._check_nd_one(fftn, dtype, shape, axes, overwritable,
overwrite_x)
self._check_nd_one(ifftn, dtype, shape, axes, overwritable,
overwrite_x)
|
|
from unittest import TestCase
from numpy.testing import assert_equal, assert_array_equal
import numpy as np
from srsly import msgpack
class ThirdParty(object):
def __init__(self, foo=b"bar"):
self.foo = foo
def __eq__(self, other):
return isinstance(other, ThirdParty) and self.foo == other.foo
class test_numpy_msgpack(TestCase):
def encode_decode(self, x, use_bin_type=False, raw=True):
x_enc = msgpack.packb(x, use_bin_type=use_bin_type)
return msgpack.unpackb(x_enc, raw=raw)
def encode_thirdparty(self, obj):
return dict(__thirdparty__=True, foo=obj.foo)
def decode_thirdparty(self, obj):
if b"__thirdparty__" in obj:
return ThirdParty(foo=obj[b"foo"])
return obj
def encode_decode_thirdparty(self, x, use_bin_type=False, raw=True):
x_enc = msgpack.packb(
x, default=self.encode_thirdparty, use_bin_type=use_bin_type
)
return msgpack.unpackb(x_enc, raw=raw, object_hook=self.decode_thirdparty)
def test_bin(self):
# Since bytes == str in Python 2.7, the following
# should pass on both 2.7 and 3.*
assert_equal(type(self.encode_decode(b"foo")), bytes)
def test_str(self):
assert_equal(type(self.encode_decode("foo")), bytes)
def test_numpy_scalar_bool(self):
x = np.bool_(True)
x_rec = self.encode_decode(x)
assert_equal(x, x_rec)
assert_equal(type(x), type(x_rec))
x = np.bool_(False)
x_rec = self.encode_decode(x)
assert_equal(x, x_rec)
assert_equal(type(x), type(x_rec))
def test_numpy_scalar_float(self):
x = np.float32(np.random.rand())
x_rec = self.encode_decode(x)
assert_equal(x, x_rec)
assert_equal(type(x), type(x_rec))
def test_numpy_scalar_complex(self):
x = np.complex64(np.random.rand() + 1j * np.random.rand())
x_rec = self.encode_decode(x)
assert_equal(x, x_rec)
assert_equal(type(x), type(x_rec))
def test_scalar_float(self):
x = np.random.rand()
x_rec = self.encode_decode(x)
assert_equal(x, x_rec)
assert_equal(type(x), type(x_rec))
def test_scalar_complex(self):
x = np.random.rand() + 1j * np.random.rand()
x_rec = self.encode_decode(x)
assert_equal(x, x_rec)
assert_equal(type(x), type(x_rec))
def test_list_numpy_float(self):
x = [np.float32(np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
assert_array_equal(x, x_rec)
assert_array_equal([type(e) for e in x], [type(e) for e in x_rec])
def test_list_numpy_float_complex(self):
x = [np.float32(np.random.rand()) for i in range(5)] + [
np.complex128(np.random.rand() + 1j * np.random.rand()) for i in range(5)
]
x_rec = self.encode_decode(x)
assert_array_equal(x, x_rec)
assert_array_equal([type(e) for e in x], [type(e) for e in x_rec])
def test_list_float(self):
x = [np.random.rand() for i in range(5)]
x_rec = self.encode_decode(x)
assert_array_equal(x, x_rec)
assert_array_equal([type(e) for e in x], [type(e) for e in x_rec])
def test_list_float_complex(self):
x = [(np.random.rand() + 1j * np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
assert_array_equal(x, x_rec)
assert_array_equal([type(e) for e in x], [type(e) for e in x_rec])
def test_list_str(self):
x = [b"x" * i for i in range(5)]
x_rec = self.encode_decode(x)
assert_array_equal(x, x_rec)
assert_array_equal([type(e) for e in x_rec], [bytes] * 5)
def test_dict_float(self):
x = {b"foo": 1.0, b"bar": 2.0}
x_rec = self.encode_decode(x)
assert_array_equal(sorted(x.values()), sorted(x_rec.values()))
assert_array_equal(
[type(e) for e in sorted(x.values())],
[type(e) for e in sorted(x_rec.values())],
)
assert_array_equal(sorted(x.keys()), sorted(x_rec.keys()))
assert_array_equal(
[type(e) for e in sorted(x.keys())], [type(e) for e in sorted(x_rec.keys())]
)
def test_dict_complex(self):
x = {b"foo": 1.0 + 1.0j, b"bar": 2.0 + 2.0j}
x_rec = self.encode_decode(x)
assert_array_equal(
sorted(x.values(), key=np.linalg.norm),
sorted(x_rec.values(), key=np.linalg.norm),
)
assert_array_equal(
[type(e) for e in sorted(x.values(), key=np.linalg.norm)],
[type(e) for e in sorted(x_rec.values(), key=np.linalg.norm)],
)
assert_array_equal(sorted(x.keys()), sorted(x_rec.keys()))
assert_array_equal(
[type(e) for e in sorted(x.keys())], [type(e) for e in sorted(x_rec.keys())]
)
def test_dict_str(self):
x = {b"foo": b"xxx", b"bar": b"yyyy"}
x_rec = self.encode_decode(x)
assert_array_equal(sorted(x.values()), sorted(x_rec.values()))
assert_array_equal(
[type(e) for e in sorted(x.values())],
[type(e) for e in sorted(x_rec.values())],
)
assert_array_equal(sorted(x.keys()), sorted(x_rec.keys()))
assert_array_equal(
[type(e) for e in sorted(x.keys())], [type(e) for e in sorted(x_rec.keys())]
)
def test_dict_numpy_float(self):
x = {b"foo": np.float32(1.0), b"bar": np.float32(2.0)}
x_rec = self.encode_decode(x)
assert_array_equal(sorted(x.values()), sorted(x_rec.values()))
assert_array_equal(
[type(e) for e in sorted(x.values())],
[type(e) for e in sorted(x_rec.values())],
)
assert_array_equal(sorted(x.keys()), sorted(x_rec.keys()))
assert_array_equal(
[type(e) for e in sorted(x.keys())], [type(e) for e in sorted(x_rec.keys())]
)
def test_dict_numpy_complex(self):
x = {b"foo": np.complex128(1.0 + 1.0j), b"bar": np.complex128(2.0 + 2.0j)}
x_rec = self.encode_decode(x)
assert_array_equal(
sorted(x.values(), key=np.linalg.norm),
sorted(x_rec.values(), key=np.linalg.norm),
)
assert_array_equal(
[type(e) for e in sorted(x.values(), key=np.linalg.norm)],
[type(e) for e in sorted(x_rec.values(), key=np.linalg.norm)],
)
assert_array_equal(sorted(x.keys()), sorted(x_rec.keys()))
assert_array_equal(
[type(e) for e in sorted(x.keys())], [type(e) for e in sorted(x_rec.keys())]
)
def test_numpy_array_float(self):
x = np.random.rand(5).astype(np.float32)
x_rec = self.encode_decode(x)
assert_array_equal(x, x_rec)
assert_equal(x.dtype, x_rec.dtype)
def test_numpy_array_complex(self):
x = (np.random.rand(5) + 1j * np.random.rand(5)).astype(np.complex128)
x_rec = self.encode_decode(x)
assert_array_equal(x, x_rec)
assert_equal(x.dtype, x_rec.dtype)
def test_numpy_array_float_2d(self):
x = np.random.rand(5, 5).astype(np.float32)
x_rec = self.encode_decode(x)
assert_array_equal(x, x_rec)
assert_equal(x.dtype, x_rec.dtype)
def test_numpy_array_str(self):
x = np.array([b"aaa", b"bbbb", b"ccccc"])
x_rec = self.encode_decode(x)
assert_array_equal(x, x_rec)
assert_equal(x.dtype, x_rec.dtype)
def test_numpy_array_mixed(self):
x = np.array(
[(1, 2, b"a", [1.0, 2.0])],
np.dtype(
[
("arg0", np.uint32),
("arg1", np.uint32),
("arg2", "S1"),
("arg3", np.float32, (2,)),
]
),
)
x_rec = self.encode_decode(x)
assert_array_equal(x, x_rec)
assert_equal(x.dtype, x_rec.dtype)
def test_numpy_array_noncontiguous(self):
x = np.ones((10, 10), np.uint32)[0:5, 0:5]
x_rec = self.encode_decode(x)
assert_array_equal(x, x_rec)
assert_equal(x.dtype, x_rec.dtype)
def test_list_mixed(self):
x = [1.0, np.float32(3.5), np.complex128(4.25), b"foo"]
x_rec = self.encode_decode(x)
assert_array_equal(x, x_rec)
assert_array_equal([type(e) for e in x], [type(e) for e in x_rec])
def test_chain(self):
x = ThirdParty(foo=b"test marshal/unmarshal")
x_rec = self.encode_decode_thirdparty(x)
self.assertEqual(x, x_rec)
|
|
#!/usr/bin/env python
"""Test the flow archive."""
import os
import mock
from grr.gui import api_call_handler_utils
from grr.gui import api_call_router_with_approval_checks
from grr.gui import gui_test_lib
from grr.gui import runtests_test
from grr.gui.api_plugins import flow as api_flow
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import flags
from grr.lib import flow
from grr.lib import test_lib
from grr.lib import utils
from grr.lib.flows.general import transfer as flows_transfer
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import paths as rdf_paths
class TestFlowArchive(gui_test_lib.GRRSeleniumTest):
def setUp(self):
super(TestFlowArchive, self).setUp()
with self.ACLChecksDisabled():
self.client_id = rdf_client.ClientURN("C.0000000000000001")
with aff4.FACTORY.Open(
self.client_id, mode="rw", token=self.token) as client:
client.Set(client.Schema.HOSTNAME("HostC.0000000000000001"))
self.RequestAndGrantClientApproval(self.client_id)
self.action_mock = action_mocks.FileFinderClientMock()
def testDoesNotShowGenerateArchiveButtonForNonExportableRDFValues(self):
with self.ACLChecksDisabled():
for _ in test_lib.TestFlowHelper(
"FlowWithOneNetworkConnectionResult",
self.action_mock,
client_id=self.client_id,
token=self.token):
pass
self.Open("/#c=C.0000000000000001")
self.Click("css=a[grrtarget='client.flows']")
self.Click("css=td:contains('FlowWithOneNetworkConnectionResult')")
self.Click("link=Results")
self.WaitUntil(self.IsTextPresent, "42")
self.WaitUntilNot(self.IsTextPresent,
"Files referenced in this collection can be downloaded")
def testDoesNotShowGenerateArchiveButtonWhenResultCollectionIsEmpty(self):
with self.ACLChecksDisabled():
for _ in test_lib.TestFlowHelper(
gui_test_lib.RecursiveTestFlow.__name__,
self.action_mock,
client_id=self.client_id,
token=self.token):
pass
self.Open("/#c=C.0000000000000001")
self.Click("css=a[grrtarget='client.flows']")
self.Click("css=td:contains('RecursiveTestFlow')")
self.Click("link=Results")
self.WaitUntil(self.IsTextPresent, "Value")
self.WaitUntilNot(self.IsTextPresent,
"Files referenced in this collection can be downloaded")
def testShowsGenerateArchiveButtonForGetFileFlow(self):
pathspec = rdf_paths.PathSpec(
path=os.path.join(self.base_path, "test.plist"),
pathtype=rdf_paths.PathSpec.PathType.OS)
with self.ACLChecksDisabled():
for _ in test_lib.TestFlowHelper(
flows_transfer.GetFile.__name__,
self.action_mock,
client_id=self.client_id,
pathspec=pathspec,
token=self.token):
pass
self.Open("/#c=C.0000000000000001")
self.Click("css=a[grrtarget='client.flows']")
self.Click("css=td:contains('GetFile')")
self.Click("link=Results")
self.WaitUntil(self.IsTextPresent,
"Files referenced in this collection can be downloaded")
def testGenerateArchiveButtonGetsDisabledAfterClick(self):
pathspec = rdf_paths.PathSpec(
path=os.path.join(self.base_path, "test.plist"),
pathtype=rdf_paths.PathSpec.PathType.OS)
with self.ACLChecksDisabled():
for _ in test_lib.TestFlowHelper(
flows_transfer.GetFile.__name__,
self.action_mock,
client_id=self.client_id,
pathspec=pathspec,
token=self.token):
pass
self.Open("/#c=C.0000000000000001")
self.Click("css=a[grrtarget='client.flows']")
self.Click("css=td:contains('GetFile')")
self.Click("link=Results")
self.Click("css=button.DownloadButton")
self.WaitUntil(self.IsElementPresent, "css=button.DownloadButton[disabled]")
self.WaitUntil(self.IsTextPresent, "Generation has started")
def testShowsErrorMessageIfArchiveStreamingFailsBeforeFirstChunkIsSent(self):
pathspec = rdf_paths.PathSpec(
path=os.path.join(self.base_path, "test.plist"),
pathtype=rdf_paths.PathSpec.PathType.OS)
flow_urn = flow.GRRFlow.StartFlow(
flow_name=flows_transfer.GetFile.__name__,
client_id=self.client_id,
pathspec=pathspec,
token=self.token)
with self.ACLChecksDisabled():
for _ in test_lib.TestFlowHelper(
flow_urn,
self.action_mock,
client_id=self.client_id,
token=self.token):
pass
def RaisingStub(*unused_args, **unused_kwargs):
raise RuntimeError("something went wrong")
with utils.Stubber(api_call_handler_utils.CollectionArchiveGenerator,
"Generate", RaisingStub):
self.Open("/#c=C.0000000000000001")
self.Click("css=a[grrtarget='client.flows']")
self.Click("css=td:contains('GetFile')")
self.Click("link=Results")
self.Click("css=button.DownloadButton")
self.WaitUntil(self.IsTextPresent,
"Can't generate archive: Unknown error")
self.WaitUntil(self.IsUserNotificationPresent,
"Archive generation failed for flow %s" %
flow_urn.Basename())
@mock.patch.object(api_call_router_with_approval_checks.
ApiCallRouterWithApprovalChecksWithRobotAccess,
"GetExportedFlowResults")
def testClickingOnDownloadAsCsvZipStartsDownload(self, mock_method):
pathspec = rdf_paths.PathSpec(
path=os.path.join(self.base_path, "test.plist"),
pathtype=rdf_paths.PathSpec.PathType.OS)
with self.ACLChecksDisabled():
flow_urn = flow.GRRFlow.StartFlow(
flow_name=flows_transfer.GetFile.__name__,
client_id=self.client_id,
pathspec=pathspec,
token=self.token)
for _ in test_lib.TestFlowHelper(
flow_urn,
self.action_mock,
client_id=self.client_id,
token=self.token):
pass
self.Open("/#/clients/C.0000000000000001/flows/%s" % flow_urn.Basename())
self.Click("link=Results")
self.Click("css=grr-download-collection-as button[name='csv-zip']")
def MockMethodIsCalled():
try:
mock_method.assert_called_once_with(
api_flow.ApiGetExportedFlowResultsArgs(
client_id=self.client_id.Basename(),
flow_id=flow_urn.Basename(),
plugin_name="csv-zip"),
token=mock.ANY)
return True
except AssertionError:
return False
self.WaitUntil(MockMethodIsCalled)
def testDoesNotShowDownloadAsPanelIfCollectionIsEmpty(self):
with self.ACLChecksDisabled():
flow_urn = flow.GRRFlow.StartFlow(
flow_name=gui_test_lib.RecursiveTestFlow.__name__,
client_id=self.client_id,
token=self.token)
for _ in test_lib.TestFlowHelper(
flow_urn,
self.action_mock,
client_id=self.client_id,
token=self.token):
pass
self.Open("/#/clients/C.0000000000000001/flows/%s" % flow_urn.Basename())
self.Click("link=Results")
self.WaitUntil(self.IsTextPresent, "Value")
self.WaitUntilNot(self.IsElementPresent, "grr-download-collection-as")
def main(argv):
# Run the full test suite
runtests_test.SeleniumTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
|
|
from StringIO import StringIO
from collections import namedtuple
from contextlib import closing
import logging
import pprint
import sys
import traceback
from pyutilib.component.core import ExtensionPoint, PluginGlobals
# TODO Update plugins to import from `pyutilib.component.core` directly
# instead of importing from here.
from pyutilib.component.core import Plugin, SingletonPlugin, implements
import path_helpers as ph
import task_scheduler
from .interfaces import IPlugin, IWaveformGenerator, ILoggingPlugin
from logging_helpers import _L, caller_name #: .. versionadded:: 2.20
logger = logging.getLogger(__name__)
ScheduleRequest = namedtuple('ScheduleRequest', 'before after')
def load_plugins(plugins_dir='plugins', import_from_parent=True):
'''
Import each Python plugin module in the specified directory and create an
instance of each contained plugin class for which an instance has not yet
been created.
Parameters
----------
plugins_dir : str
Directory containing zero or more Python plugin modules to import.
import_from_parent : bool
Add parent of specified directory to system path and import
``<parent>.<module>``.
..notes::
**Not recommended**, but kept as default to maintain legacy
protocol compatibility.
Returns
-------
list
Newly created plugins (plugins are not recreated if they were
previously loaded.)
.. versionchanged:: 2.25
Do not import hidden directories (i.e., name starts with ``.``).
'''
logger = _L() # use logger with function context
logger.info('plugins_dir=`%s`', plugins_dir)
plugins_dir = ph.path(plugins_dir).realpath()
logger.info('Loading plugins:')
plugins_root = plugins_dir.parent if import_from_parent else plugins_dir
if plugins_root not in sys.path:
sys.path.insert(0, plugins_root)
# Create an instance of each of the plugins, but set it to disabled
e = PluginGlobals.env('microdrop.managed')
initial_plugins = set(e.plugin_registry.values())
imported_plugins = set()
for package_i in plugins_dir.dirs():
if package_i.isjunction() and not package_i.readlink().isdir():
# Plugin directory is a junction/link to a non-existent target
# path.
logger.info('Skip import of `%s` (broken link to `%s`).',
package_i.name, package_i.readlink())
continue
elif package_i.name in (p.__module__ for p in initial_plugins):
# Plugin with the same name has already been imported.
logger.info('Skip import of `%s` (plugin with same name has '
'already been imported).', package_i.name)
continue
elif package_i.name.startswith('.'):
logger.info('Skip import of hidden directory `%s`.',
package_i.name)
continue
try:
plugin_module = package_i.name
if import_from_parent:
plugin_module = '.'.join([plugins_dir.name, plugin_module])
import_statement = 'import {}'.format(plugin_module)
logger.debug(import_statement)
exec(import_statement)
all_plugins = set(e.plugin_registry.values())
current_plugin = list(all_plugins - initial_plugins -
imported_plugins)[0]
logger.info('\t Imported: %s (%s)', current_plugin.__name__,
package_i)
imported_plugins.add(current_plugin)
except Exception:
map(logger.info, traceback.format_exc().splitlines())
logger.error('Error loading %s plugin.', package_i.name,
exc_info=True)
# For each newly imported plugin class, create a service instance
# initialized to the disabled state.
new_plugins = []
for class_ in imported_plugins:
service = class_()
service.disable()
new_plugins.append(service)
logger.debug('\t Created new plugin services: %s',
','.join([p.__class__.__name__ for p in new_plugins]))
return new_plugins
def log_summary():
'''
Dump summary of plugins to log.
'''
observers = ExtensionPoint(IPlugin)
logging.info('Registered plugins:')
for observer in observers:
logging.info('\t %s' % observer)
observers = ExtensionPoint(IWaveformGenerator)
logging.info('Registered function generator plugins:')
for observer in observers:
logging.info('\t %s' % observer)
observers = ExtensionPoint(ILoggingPlugin)
logging.info('Registered logging plugins:')
for observer in observers:
logging.info('\t %s' % observer)
def get_plugin_names(env=None):
'''
Parameters
----------
env : str, optional
Name of ``pyutilib.component.core`` plugin environment (e.g.,
``'microdrop.managed``').
Returns
-------
list(str)
List of plugin names (e.g., ``['StepLabelPlugin', ...]``).
'''
if env is None:
env = 'pca'
e = PluginGlobals.env(env)
return list(e.plugin_registry.keys())
def get_service_class(name, env='microdrop.managed'):
'''
Parameters
----------
name : str
Plugin class name (e.g., ``App``).
env : str, optional
Name of ``pyutilib.component.core`` plugin environment (e.g.,
``'microdrop.managed``').
Returns
-------
class
Class type matching specified plugin class name.
..notes::
Returns actual class type -- **not** an instance of the plugin
service.
'''
e = PluginGlobals.env(env)
if name not in e.plugin_registry:
raise KeyError('No plugin registered with name: %s' % name)
return e.plugin_registry[name]
def get_service_instance_by_name(name, env='microdrop.managed'):
'''
Parameters
----------
name : str
Plugin name (e.g., ``microdrop.zmq_hub_plugin``).
Corresponds to ``plugin_name`` key in plugin ``properties.yml`` file.
env : str, optional
Name of ``pyutilib.component.core`` plugin environment (e.g.,
``'microdrop.managed``').
Returns
-------
object
Active service instance matching specified plugin name.
Raises
------
KeyError
If no plugin is found registered with the specified name.
'''
e = PluginGlobals.env(env)
plugins = [p for i, p in enumerate(e.services) if name == p.name]
if plugins:
return plugins[0]
else:
raise KeyError('No plugin registered with name: %s' % name)
def get_service_instance_by_package_name(name, env='microdrop.managed'):
'''
Parameters
----------
name : str
Plugin Python module name (e.g., ``dmf_control_board_plugin``).
Corresponds to ``package_name`` key in plugin ``properties.yml`` file.
env : str, optional
Name of ``pyutilib.component.core`` plugin environment (e.g.,
``'microdrop.managed``').
Returns
-------
object
Active service instance matching specified plugin module name.
'''
e = PluginGlobals.env(env)
plugins = [p for i, p in enumerate(e.services)
if name == get_plugin_package_name(p.__class__.__module__)]
if plugins:
return plugins[0]
else:
raise KeyError('No plugin registered with package name: %s' % name)
def get_plugin_package_name(module_name):
'''
Parameters
----------
module_name : str
Fully-qualified class name (e.g.,
``'plugins.dmf_control_board_plugin'``).
Returns
-------
str
Relative module name (e.g., ``'dmf_control_board_plugin'``)
'''
return module_name.split('.')[-1]
def get_service_instance(class_, env='microdrop.managed'):
'''
Parameters
----------
class_ : class
Plugin class type.
env : str, optional
Name of ``pyutilib.component.core`` plugin environment (e.g.,
``'microdrop.managed``').
Returns
-------
object or None
Registered service instance for the specified plugin class type.
Returns ``None`` if no service is registered for the specified plugin
class type.
'''
e = PluginGlobals.env(env)
for service in e.services:
if isinstance(service, class_):
# A plugin of this type is registered
return service
return None
def get_service_names(env='microdrop.managed'):
'''
Parameters
----------
env : str, optional
Name of ``pyutilib.component.core`` plugin environment (e.g.,
``'microdrop.managed``').
Returns
-------
list
List of plugin names (e.g., ``['microdrop.step_label_plugin', ...]``).
'''
e = PluginGlobals.env(env)
service_names = []
for name in get_plugin_names(env):
plugin_class = e.plugin_registry[name]
service = get_service_instance(plugin_class, env=env)
if service is None:
_L().warn('Plugin `%s` exists in registry, but instance cannot '
'be found.', name)
else:
service_names.append(service.name)
return service_names
def get_schedule(observers, function):
'''
Generate observer order based on scheduling requests for specified
function.
Parameters
----------
observers : dict
Mapping from service names to service instances.
function : str
Name of function to generate schedule for.
Returns
-------
list
List of observer service names in scheduled order.
'''
logger = _L() # use logger with function context
# Query plugins for schedule requests for 'function'
schedule_requests = {}
for observer in observers.values():
if hasattr(observer, 'get_schedule_requests'):
schedule_requests[observer.name] =\
observer.get_schedule_requests(function)
if schedule_requests:
scheduler = task_scheduler.TaskScheduler(observers.keys())
for request in [r for name, requests in schedule_requests.items()
for r in requests]:
try:
scheduler.request_order(*request)
except AssertionError:
logger.debug('Schedule requests for `%s`', function)
map(logger.debug,
pprint.pformat(schedule_requests).splitlines())
logger.info('emit_signal(%s) could not add schedule request '
'%s', function, request)
continue
return scheduler.get_schedule()
else:
return observers.keys()
def get_observers(function, interface=IPlugin):
'''
Get dictionary of observers implementing the specified function.
Parameters
----------
function : str
Name of function to generate schedule for.
interface : class, optional
Plugin interface class.
Returns
-------
dict
Mapping from service names to service instances.
'''
observers = {}
for obs in ExtensionPoint(interface):
if hasattr(obs, function):
observers[obs.name] = obs
return observers
def emit_signal(function, args=None, interface=IPlugin):
'''
Call specified function on each enabled plugin implementing the function
and collect results.
Parameters
----------
function : str
Name of function to generate schedule for.
interface : class, optional
Plugin interface class.
Returns
-------
dict
Mapping from each service name to the respective function return value.
.. versionchanged:: 2.20
Log caller at info level, and log args and observers at debug level.
'''
logger = _L() # use logger with function context
i = 0
caller = caller_name(skip=i)
while not caller or caller == 'microdrop.plugin_manager.emit_signal':
i += 1
caller = caller_name(skip=i)
try:
observers = get_observers(function, interface)
schedule = get_schedule(observers, function)
return_codes = {}
if args is None:
args = []
elif not isinstance(args, list):
args = [args]
if not any((name in caller) for name in ('logger', 'emit_signal')):
logger.debug('caller: %s -> %s', caller, function)
if logger.getEffectiveLevel() <= logging.DEBUG:
logger.debug('args: (%s)', ', '.join(map(repr, args)))
for observer_name in schedule:
observer = observers[observer_name]
try:
f = getattr(observer, function)
logger.debug(' call: %s.%s(...)', observer.name, function)
return_codes[observer.name] = f(*args)
except Exception, why:
with closing(StringIO()) as message:
if hasattr(observer, "name"):
if interface == ILoggingPlugin:
# If this is a logging plugin, do not try to log
# since that will result in infinite recursion.
# Instead, just continue onto the next plugin.
continue
print >> message, \
'%s plugin crashed processing %s signal.' % \
(observer.name, function)
print >> message, 'Reason:', str(why)
logger.error(message.getvalue().strip())
map(logger.info, traceback.format_exc().splitlines())
return return_codes
except Exception, why:
logger.error(why, exc_info=True)
return {}
def enable(name, env='microdrop.managed'):
'''
Enable specified plugin.
Parameters
----------
name : str
Plugin name (e.g., ``microdrop.zmq_hub_plugin``).
Corresponds to ``plugin_name`` key in plugin ``properties.yml`` file.
env : str, optional
Name of ``pyutilib.component.core`` plugin environment (e.g.,
``'microdrop.managed``').
'''
service = get_service_instance_by_name(name, env)
if not service.enabled():
service.enable()
_L().info('[PluginManager] Enabled plugin: %s', name)
if hasattr(service, "on_plugin_enable"):
service.on_plugin_enable()
emit_signal('on_plugin_enabled', [env, service])
def disable(name, env='microdrop.managed'):
'''
Disable specified plugin.
Parameters
----------
name : str
Plugin name (e.g., ``microdrop.zmq_hub_plugin``).
Corresponds to ``plugin_name`` key in plugin ``properties.yml`` file.
env : str, optional
Name of ``pyutilib.component.core`` plugin environment (e.g.,
``'microdrop.managed``').
'''
service = get_service_instance_by_name(name, env)
if service and service.enabled():
service.disable()
if hasattr(service, "on_plugin_disable"):
service.on_plugin_disable()
emit_signal('on_plugin_disabled', [env, service])
logging.info('[PluginManager] Disabled plugin: %s' % name)
def connect_pyutilib_signal(signals, signal, *args, **kwargs):
'''
Connect pyutilib callbacks to corresponding signal in blinker namespace.
Allows code to be written using blinker signals for easier testing outside
of MicroDrop, while maintaining compatibility with pyutilib.
Parameters
----------
signals : blinker.Namespace
signal : str
Pyutilib signal name.
*args, **kwargs
Arguments passed to `pyutilib.component.core.ExtensionPoint()`
Example
-------
>>> from microdrop.interfaces import IPlugin
>>> import microdrop.app
>>>
>>> signals = blinker.Namespace()
>>> signal = 'get_schedule_requests'
>>> args = ('on_plugin_enable', )
>>> connect_pyutilib_signal(signals, signal, IPlugin)
>>> signals.signal(signal).send(*args)
[(<bound method DmfDeviceController.get_schedule_requests of <Plugin DmfDeviceController 'microdrop.gui.dmf_device_controller'>>, [ScheduleRequest(before='microdrop.gui.config_controller', after='microdrop.gui.dmf_device_controller'), ScheduleRequest(before='microdrop.gui.main_window_controller', after='microdrop.gui.dmf_device_controller')])]
'''
import functools as ft
import inspect
from microdrop.plugin_manager import ExtensionPoint
callbacks = [getattr(p, signal) for p in ExtensionPoint(*args, **kwargs) if hasattr(p, signal)]
for callback_i in callbacks:
if len(inspect.getargspec(callback_i)[0]) < 2:
# Blinker signals require _at least_ one argument (assumed to be sender).
# Wrap pyutilib signals without any arguments to make them work with blinker.
@ft.wraps(callback_i)
def _callback(*args, **kwargs):
return callback_i()
else:
_callback = callback_i
signals.signal(signal).connect(_callback, weak=False)
PluginGlobals.pop_env()
|
|
# Copyright (c) 2015, University of Memphis, MD2K Center of Excellence
# - Timothy Hnat <[email protected]>
# - Karen Hovsepian <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import json
import numpy as np
from collections import Counter
from collections import Sized
from pathlib import Path
from pprint import pprint
from sklearn import svm, metrics, preprocessing
from sklearn.base import clone, is_classifier
from sklearn.cross_validation import LabelKFold
from sklearn.cross_validation import check_cv
from sklearn.externals.joblib import Parallel, delayed
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV, ParameterSampler, ParameterGrid
from sklearn.utils.validation import _num_samples, indexable
# Command line parameter configuration
parser = argparse.ArgumentParser(description='Train and evaluate the cStress model')
parser.add_argument('--featureFolder', dest='featureFolder', required=True,
help='Directory containing feature files')
parser.add_argument('--scorer', type=str, required=True, dest='scorer',
help='Specify which scorer function to use (f1 or twobias)')
parser.add_argument('--whichsearch', type=str, required=True, dest='whichsearch',
help='Specify which search function to use (GridSearch or RandomizedSearch')
parser.add_argument('--n_iter', type=int, required=False, dest='n_iter',
help='If Randomized Search is used, how many iterations to use')
parser.add_argument('--modelOutput', type=str, required=True, dest='modelOutput',
help='Model file to write')
parser.add_argument('--featureFile', type=str, required=True, dest='featureFile',
help='Feature vector file name')
parser.add_argument('--stressFile', type=str, required=True, dest='stressFile',
help='Stress ground truth filename')
args = parser.parse_args()
def cv_fit_and_score(estimator, X, y, scorer, parameters, cv, ):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
parameters : dict or None
Parameters to be set on the estimator.
cv: Cross-validation fold indeces
Returns
-------
score : float
CV score on whole set.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
estimator.set_params(**parameters)
cv_probs_ = cross_val_probs(estimator, X, y, cv)
score = scorer(cv_probs_, y)
return [score, parameters] # scoring_time]
class ModifiedGridSearchCV(GridSearchCV):
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(ModifiedGridSearchCV, self).__init__(
estimator, param_grid, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
def fit(self, X, y):
"""Actual fitting, performing the search over parameters."""
parameter_iterable = ParameterGrid(self.param_grid)
estimator = self.estimator
cv = self.cv
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(cv_fit_and_score)(clone(base_estimator), X, y, self.scoring,
parameters, cv=cv)
for parameters in parameter_iterable)
best = sorted(out, reverse=True)[0]
self.best_params_ = best[1]
self.best_score_ = best[0]
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best[1])
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class ModifiedRandomizedSearchCV(RandomizedSearchCV):
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
super(ModifiedRandomizedSearchCV, self).__init__(estimator=estimator, param_distributions=param_distributions,
n_iter=n_iter, scoring=scoring, random_state=random_state,
fit_params=fit_params, n_jobs=n_jobs, iid=iid, refit=refit,
cv=cv, verbose=verbose, pre_dispatch=pre_dispatch,
error_score=error_score)
def fit(self, X, y):
"""Actual fitting, performing the search over parameters."""
parameter_iterable = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
estimator = self.estimator
cv = self.cv
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(cv_fit_and_score)(clone(base_estimator), X, y, self.scoring,
parameters, cv=cv)
for parameters in parameter_iterable)
best = sorted(out, reverse=True)[0]
self.best_params_ = best[1]
self.best_score_ = best[0]
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best[1])
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
def decodeLabel(label):
label = label[:2] # Only the first 2 characters designate the label code
mapping = {'c1': 0, 'c2': 1, 'c3': 1, 'c4': 0, 'c5': 0, 'c6': 0, 'c7': 2, }
return mapping[label]
def readFeatures(folder, filename):
features = []
path = Path(folder)
files = list(path.glob('**/' + filename))
for f in files:
participantID = int(f.parent.name[2:])
with f.open() as file:
for line in file.readlines():
parts = [x.strip() for x in line.split(',')]
featureVector = [participantID, int(parts[0])]
featureVector.extend([float(p) for p in parts[1:]])
features.append(featureVector)
return features
def readStressmarks(folder, filename):
features = []
path = Path(folder)
files = list(path.glob('**/' + filename))
for f in files:
participantID = int(f.parent.name[2:])
with f.open() as file:
for line in file.readlines():
parts = [x.strip() for x in line.split(',')]
label = parts[0][:2]
features.append([participantID, label, int(parts[2]), int(parts[3])])
return features
def checkStressMark(stressMark, pid, starttime):
endtime = starttime + 60000 # One minute windows
result = []
for line in stressMark:
[id, gt, st, et] = line
if id == pid and (gt not in ['c7']):
if (starttime > st) and (endtime < et):
result.append(gt)
data = Counter(result)
return data.most_common(1)
def analyze_events_with_features(features, stress_marks):
featureLabels = []
finalFeatures = []
subjects = []
startTimes = {}
for pid, label, start, end in stress_marks:
if label == 'c4':
if pid not in startTimes:
startTimes[pid] = np.inf
startTimes[pid] = min(startTimes[pid], start)
for line in features:
id = line[0]
ts = line[1]
f = line[2:]
if ts < startTimes[id]:
continue # Outside of starting time
label = checkStressMark(stress_marks, id, ts)
if len(label) > 0:
stressClass = decodeLabel(label[0][0])
featureLabels.append(stressClass)
finalFeatures.append(f)
subjects.append(id)
return finalFeatures, featureLabels, subjects
def get_svmdataset(traindata, trainlabels):
input = []
output = []
foldinds = []
for i in range(len(trainlabels)):
if trainlabels[i] == 1:
foldinds.append(i)
if trainlabels[i] == 0:
foldinds.append(i)
input = np.array(input, dtype='float64')
return output, input, foldinds
def reduceData(data, r):
result = []
for d in data:
result.append([d[i] for i in r])
return result
def f1Bias_scorer(estimator, X, y, ret_bias=False):
probas_ = estimator.predict_proba(X)
precision, recall, thresholds = metrics.precision_recall_curve(y, probas_[:, 1])
f1 = 0.0
for i in range(0, len(thresholds)):
if not (precision[i] == 0 and recall[i] == 0):
f = 2 * (precision[i] * recall[i]) / (precision[i] + recall[i])
if f > f1:
f1 = f
bias = thresholds[i]
if ret_bias:
return f1, bias
else:
return f1
def Twobias_scorer_CV(probs, y, ret_bias=False):
db = np.transpose(np.vstack([probs, y]))
db = db[np.argsort(db[:, 0]), :]
pos = np.sum(y == 1)
n = len(y)
neg = n - pos
tp, tn = pos, 0
lost = 0
optbias = []
minloss = 1
for i in range(n):
# p = db[i,1]
if db[i, 1] == 1: # positive
tp -= 1.0
else:
tn += 1.0
# v1 = tp/pos
# v2 = tn/neg
if tp / pos >= 0.95 and tn / neg >= 0.95:
optbias = [db[i, 0], db[i, 0]]
continue
running_pos = pos
running_neg = neg
running_tp = tp
running_tn = tn
for j in range(i + 1, n):
# p1 = db[j,1]
if db[j, 1] == 1: # positive
running_tp -= 1.0
running_pos -= 1
else:
running_neg -= 1
lost = (j - i) * 1.0 / n
if running_pos == 0 or running_neg == 0:
break
# v1 = running_tp/running_pos
# v2 = running_tn/running_neg
if running_tp / running_pos >= 0.95 and running_tn / running_neg >= 0.95 and lost < minloss:
minloss = lost
optbias = [db[i, 0], db[j, 0]]
if ret_bias:
return -minloss, optbias
else:
return -minloss
def f1Bias_scorer_CV(probs, y, ret_bias=False):
precision, recall, thresholds = metrics.precision_recall_curve(y, probs)
f1 = 0.0
for i in range(0, len(thresholds)):
if not (precision[i] == 0 and recall[i] == 0):
f = 2 * (precision[i] * recall[i]) / (precision[i] + recall[i])
if f > f1:
f1 = f
bias = thresholds[i]
if ret_bias:
return f1, bias
else:
return f1
def svmOutput(filename, traindata, trainlabels):
with open(filename, 'w') as f:
for i in range(0, len(trainlabels)):
f.write(str(trainlabels[i]))
for fi in range(0, len(traindata[i])):
f.write(" " + str(fi + 1) + ":" + str(traindata[i][fi]))
f.write("\n")
def saveModel(filename, model, normparams, bias=0.5):
class Object:
def to_JSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
class Kernel(Object):
def __init__(self, type, parameters):
self.type = type
self.parameters = parameters
class KernelParam(Object):
def __init__(self, name, value):
self.name = name;
self.value = value
class Support(Object):
def __init__(self, dualCoef, supportVector):
self.dualCoef = dualCoef
self.supportVector = supportVector
class NormParam(Object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
class SVCModel(Object):
def __init__(self, modelName, modelType, intercept, bias, probA, probB, kernel, support, normparams):
self.modelName = modelName;
self.modelType = modelType;
self.intercept = intercept;
self.bias = bias;
self.probA = probA;
self.probB = probB;
self.kernel = kernel
self.support = support
self.normparams = normparams
model = SVCModel('cStress', 'svc', model.intercept_[0], bias, model.probA_[0], model.probB_[0],
Kernel('rbf', [KernelParam('gamma', model._gamma)]),
[Support(model.dual_coef_[0][i], list(model.support_vectors_[i])) for i in
range(len(model.dual_coef_[0]))],
[NormParam(normparams.mean_[i], normparams.scale_[i]) for i in range(len(normparams.scale_))])
with open(filename, 'w') as f:
print >> f, model.to_JSON()
def cross_val_probs(estimator, X, y, cv):
probs = np.zeros(len(y))
for train, test in cv:
temp = estimator.fit(X[train], y[train]).predict_proba(X[test])
probs[test] = temp[:, 1]
return probs
# This tool accepts the data produced by the Java cStress implementation and trains and evaluates an SVM model with
# cross-subject validation
if __name__ == '__main__':
features = readFeatures(args.featureFolder, args.featureFile)
groundtruth = readStressmarks(args.featureFolder, args.stressFile)
traindata, trainlabels, subjects = analyze_events_with_features(features, groundtruth)
traindata = np.asarray(traindata, dtype=np.float64)
trainlabels = np.asarray(trainlabels)
normalizer = preprocessing.StandardScaler()
traindata = normalizer.fit_transform(traindata)
lkf = LabelKFold(subjects, n_folds=len(np.unique(subjects)))
delta = 0.1
parameters = {'kernel': ['rbf'],
'C': [2 ** x for x in np.arange(-12, 12, 0.5)],
'gamma': [2 ** x for x in np.arange(-12, 12, 0.5)],
'class_weight': [{0: w, 1: 1 - w} for w in np.arange(0.0, 1.0, delta)]}
svc = svm.SVC(probability=True, verbose=False, cache_size=2000)
if args.scorer == 'f1':
scorer = f1Bias_scorer_CV
else:
scorer = Twobias_scorer_CV
if args.whichsearch == 'grid':
clf = ModifiedGridSearchCV(svc, parameters, cv=lkf, n_jobs=-1, scoring=scorer, verbose=1, iid=False)
else:
clf = ModifiedRandomizedSearchCV(estimator=svc, param_distributions=parameters, cv=lkf, n_jobs=-1,
scoring=scorer, n_iter=args.n_iter,
verbose=1, iid=False)
clf.fit(traindata, trainlabels)
pprint(clf.best_params_)
CV_probs = cross_val_probs(clf.best_estimator_, traindata, trainlabels, lkf)
score, bias = scorer(CV_probs, trainlabels, True)
print score, bias
if not bias == []:
saveModel(args.modelOutput, clf.best_estimator_, normalizer, bias)
n = len(trainlabels)
if args.scorer == 'f1':
predicted = np.asarray(CV_probs >= bias, dtype=np.int)
classified = range(n)
else:
classified = np.where(np.logical_or(CV_probs <= bias[0], CV_probs >= bias[1]))[0]
predicted = np.asarray(CV_probs[classified] >= bias[1], dtype=np.int)
print("Cross-Subject (" + str(len(np.unique(subjects))) + "-fold) Validation Prediction")
print("Accuracy: " + str(metrics.accuracy_score(trainlabels[classified], predicted)))
print(metrics.classification_report(trainlabels[classified], predicted))
print(metrics.confusion_matrix(trainlabels[classified], predicted))
print("Lost: %d (%f%%)" % (n - len(classified), (n - len(classified)) * 1.0 / n))
print("Subjects: " + str(np.unique(subjects)))
else:
print "Results not good"
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
from pex.interpreter import PythonIdentity, PythonInterpreter
from pex.package import EggPackage, Package, SourcePackage
from pex.resolver import resolve
from twitter.common.collections import OrderedSet
from pants.backend.python.targets.python_target import PythonTarget
from pants.base.exceptions import TaskError
from pants.util.dirutil import safe_concurrent_creation, safe_mkdir
# TODO(wickman) Create a safer version of this and add to twitter.common.dirutil
def _safe_link(src, dst):
try:
os.unlink(dst)
except OSError:
pass
os.symlink(src, dst)
class PythonInterpreterCache(object):
@staticmethod
def _matches(interpreter, filters):
return any(interpreter.identity.matches(filt) for filt in filters)
@classmethod
def _matching(cls, interpreters, filters):
for interpreter in interpreters:
if cls._matches(interpreter, filters):
yield interpreter
@classmethod
def select_interpreter(cls, compatibilities, allow_multiple=False):
"""Given a set of interpreters, either return them all if ``allow_multiple`` is ``True``;
otherwise, return the lowest compatible interpreter.
"""
if allow_multiple:
return compatibilities
return [min(compatibilities)] if compatibilities else []
def __init__(self, python_setup, python_repos, logger=None):
self._python_setup = python_setup
self._python_repos = python_repos
self._cache_dir = python_setup.interpreter_cache_dir
safe_mkdir(self._cache_dir)
self._interpreters = set()
self._logger = logger or (lambda msg: True)
self._default_filters = (python_setup.interpreter_requirement or b'',)
@property
def interpreters(self):
"""Returns the set of cached interpreters."""
return self._interpreters
def select_interpreter_for_targets(self, targets):
"""Pick an interpreter compatible with all the specified targets."""
allowed_interpreters = OrderedSet(self.interpreters)
tgts_with_compatibilities = [] # Used only for error messages.
# Constrain allowed_interpreters based on each target's compatibility requirements.
for target in targets:
if isinstance(target, PythonTarget) and target.compatibility:
tgts_with_compatibilities.append(target)
compatible_with_target = list(self.matched_interpreters(target.compatibility))
allowed_interpreters &= compatible_with_target
if not allowed_interpreters:
# Create a helpful error message.
unique_compatibilities = set(tuple(t.compatibility) for t in tgts_with_compatibilities)
unique_compatibilities_strs = [','.join(x) for x in unique_compatibilities if x]
tgts_with_compatibilities_strs = [t.address.spec for t in tgts_with_compatibilities]
raise TaskError('Unable to detect a suitable interpreter for compatibilities: {} '
'(Conflicting targets: {})'.format(' && '.join(unique_compatibilities_strs),
', '.join(tgts_with_compatibilities_strs)))
# Return the lowest compatible interpreter.
return self.select_interpreter(allowed_interpreters)[0]
def _interpreter_from_path(self, path, filters):
interpreter_dir = os.path.basename(path)
identity = PythonIdentity.from_path(interpreter_dir)
try:
executable = os.readlink(os.path.join(path, 'python'))
except OSError:
return None
interpreter = PythonInterpreter(executable, identity)
if self._matches(interpreter, filters):
return self._resolve(interpreter)
return None
def _setup_interpreter(self, interpreter, cache_target_path):
with safe_concurrent_creation(cache_target_path) as safe_path:
os.mkdir(safe_path) # Parent will already have been created by safe_concurrent_creation.
os.symlink(interpreter.binary, os.path.join(safe_path, 'python'))
return self._resolve(interpreter, safe_path)
def _setup_cached(self, filters):
"""Find all currently-cached interpreters."""
for interpreter_dir in os.listdir(self._cache_dir):
path = os.path.join(self._cache_dir, interpreter_dir)
pi = self._interpreter_from_path(path, filters)
if pi:
self._logger('Detected interpreter {}: {}'.format(pi.binary, str(pi.identity)))
self._interpreters.add(pi)
def _setup_paths(self, paths, filters):
"""Find interpreters under paths, and cache them."""
for interpreter in self._matching(PythonInterpreter.all(paths), filters):
identity_str = str(interpreter.identity)
cache_path = os.path.join(self._cache_dir, identity_str)
pi = self._interpreter_from_path(cache_path, filters)
if pi is None:
self._setup_interpreter(interpreter, cache_path)
pi = self._interpreter_from_path(cache_path, filters)
if pi is None:
continue
self._interpreters.add(pi)
def matched_interpreters(self, filters):
"""Given some filters, yield any interpreter that matches at least one of them.
:param filters: A sequence of strings that constrain the interpreter compatibility for this
cache, using the Requirement-style format, e.g. ``'CPython>=3', or just ['>=2.7','<3']``
for requirements agnostic to interpreter class.
"""
for match in self._matching(self.interpreters, filters):
yield match
def setup(self, paths=(), force=False, filters=(b'',)):
"""Sets up a cache of python interpreters.
NB: Must be called prior to accessing the ``interpreters`` property or the ``matches`` method.
:param paths: The paths to search for a python interpreter; the system ``PATH`` by default.
:param bool force: When ``True`` the interpreter cache is always re-built.
:param filters: A sequence of strings that constrain the interpreter compatibility for this
cache, using the Requirement-style format, e.g. ``'CPython>=3', or just ['>=2.7','<3']``
for requirements agnostic to interpreter class.
"""
filters = self._default_filters if not any(filters) else filters
setup_paths = paths or os.getenv('PATH').split(os.pathsep)
self._setup_cached(filters)
def unsatisfied_filters():
return filter(lambda f: len(list(self._matching(self.interpreters, [f]))) == 0, filters)
if force or len(unsatisfied_filters()) > 0:
self._setup_paths(setup_paths, filters)
for filt in unsatisfied_filters():
self._logger('No valid interpreters found for {}!'.format(filt))
matches = list(self.matched_interpreters(filters))
if len(matches) == 0:
self._logger('Found no valid interpreters!')
return matches
def _resolve(self, interpreter, interpreter_dir=None):
"""Resolve and cache an interpreter with a setuptools and wheel capability."""
interpreter = self._resolve_interpreter(interpreter, interpreter_dir,
self._python_setup.setuptools_requirement())
if interpreter:
return self._resolve_interpreter(interpreter, interpreter_dir,
self._python_setup.wheel_requirement())
def _resolve_interpreter(self, interpreter, interpreter_dir, requirement):
"""Given a :class:`PythonInterpreter` and a requirement, return an interpreter with the
capability of resolving that requirement or ``None`` if it's not possible to install a
suitable requirement.
If interpreter_dir is unspecified, operates on the default location.
"""
if interpreter.satisfies([requirement]):
return interpreter
if not interpreter_dir:
interpreter_dir = os.path.join(self._cache_dir, str(interpreter.identity))
target_link = os.path.join(interpreter_dir, requirement.key)
bdist = self._resolve_and_link(interpreter, requirement, target_link)
if bdist:
return interpreter.with_extra(bdist.name, bdist.raw_version, bdist.path)
else:
self._logger('Failed to resolve requirement {} for {}'.format(requirement, interpreter))
def _resolve_and_link(self, interpreter, requirement, target_link):
# Short-circuit if there is a local copy.
if os.path.exists(target_link) and os.path.exists(os.path.realpath(target_link)):
bdist = Package.from_href(os.path.realpath(target_link))
if bdist.satisfies(requirement):
return bdist
# Since we're resolving to bootstrap a bare interpreter, we won't have wheel available.
# Explicitly set the precedence to avoid resolution of wheels or distillation of sdists into
# wheels.
precedence = (EggPackage, SourcePackage)
distributions = resolve(requirements=[requirement],
fetchers=self._python_repos.get_fetchers(),
interpreter=interpreter,
context=self._python_repos.get_network_context(),
precedence=precedence)
if not distributions:
return None
assert len(distributions) == 1, ('Expected exactly 1 distribution to be resolved for {}, '
'found:\n\t{}'.format(requirement,
'\n\t'.join(map(str, distributions))))
dist_location = distributions[0].location
target_location = os.path.join(os.path.dirname(target_link), os.path.basename(dist_location))
shutil.move(dist_location, target_location)
_safe_link(target_location, target_link)
self._logger(' installed {}'.format(target_location))
return Package.from_href(target_location)
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
import operator
import time
import unittest
import socket
import os
import errno
import itertools
import random
from collections import defaultdict
from datetime import datetime
from six.moves import urllib
from swift.container import reconciler
from swift.container.server import gen_resp_headers
from swift.common.direct_client import ClientException
from swift.common import swob
from swift.common.utils import split_path, Timestamp
from test.unit import debug_logger, FakeRing, fake_http_connect
from test.unit.common.middleware.helpers import FakeSwift
def timestamp_to_last_modified(timestamp):
return datetime.fromtimestamp(
float(Timestamp(timestamp))).strftime('%Y-%m-%dT%H:%M:%S.%f')
def container_resp_headers(**kwargs):
return swob.HeaderKeyDict(gen_resp_headers(kwargs))
class FakeStoragePolicySwift(object):
def __init__(self):
self.storage_policy = defaultdict(FakeSwift)
self._mock_oldest_spi_map = {}
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
return getattr(self.storage_policy[None], name)
def __call__(self, env, start_response):
method = env['REQUEST_METHOD']
path = env['PATH_INFO']
_, acc, cont, obj = split_path(env['PATH_INFO'], 0, 4,
rest_with_last=True)
if not obj:
policy_index = None
else:
policy_index = self._mock_oldest_spi_map.get(cont, 0)
# allow backend policy override
if 'HTTP_X_BACKEND_STORAGE_POLICY_INDEX' in env:
policy_index = int(env['HTTP_X_BACKEND_STORAGE_POLICY_INDEX'])
try:
return self.storage_policy[policy_index].__call__(
env, start_response)
except KeyError:
pass
if method == 'PUT':
resp_class = swob.HTTPCreated
else:
resp_class = swob.HTTPNotFound
self.storage_policy[policy_index].register(
method, path, resp_class, {}, '')
return self.storage_policy[policy_index].__call__(
env, start_response)
class FakeInternalClient(reconciler.InternalClient):
def __init__(self, listings):
self.app = FakeStoragePolicySwift()
self.user_agent = 'fake-internal-client'
self.request_tries = 1
self.parse(listings)
def parse(self, listings):
self.accounts = defaultdict(lambda: defaultdict(list))
for item, timestamp in listings.items():
# XXX this interface is stupid
if isinstance(timestamp, tuple):
timestamp, content_type = timestamp
else:
timestamp, content_type = timestamp, 'application/x-put'
storage_policy_index, path = item
account, container_name, obj_name = split_path(
path.encode('utf-8'), 0, 3, rest_with_last=True)
self.accounts[account][container_name].append(
(obj_name, storage_policy_index, timestamp, content_type))
for account_name, containers in self.accounts.items():
for con in containers:
self.accounts[account_name][con].sort(key=lambda t: t[0])
for account, containers in self.accounts.items():
account_listing_data = []
account_path = '/v1/%s' % account
for container, objects in containers.items():
container_path = account_path + '/' + container
container_listing_data = []
for entry in objects:
(obj_name, storage_policy_index,
timestamp, content_type) = entry
if storage_policy_index is None and not obj_name:
# empty container
continue
obj_path = container_path + '/' + obj_name
ts = Timestamp(timestamp)
headers = {'X-Timestamp': ts.normal,
'X-Backend-Timestamp': ts.internal}
# register object response
self.app.storage_policy[storage_policy_index].register(
'GET', obj_path, swob.HTTPOk, headers)
self.app.storage_policy[storage_policy_index].register(
'DELETE', obj_path, swob.HTTPNoContent, {})
# container listing entry
last_modified = timestamp_to_last_modified(timestamp)
obj_data = {
'bytes': 0,
# listing data is unicode
'name': obj_name.decode('utf-8'),
'last_modified': last_modified,
'hash': timestamp,
'content_type': content_type,
}
container_listing_data.append(obj_data)
container_listing_data.sort(key=operator.itemgetter('name'))
# register container listing response
container_headers = {}
container_qry_string = '?format=json&marker=&end_marker='
self.app.register('GET', container_path + container_qry_string,
swob.HTTPOk, container_headers,
json.dumps(container_listing_data))
if container_listing_data:
obj_name = container_listing_data[-1]['name']
# client should quote and encode marker
end_qry_string = '?format=json&marker=%s&end_marker=' % (
urllib.parse.quote(obj_name.encode('utf-8')))
self.app.register('GET', container_path + end_qry_string,
swob.HTTPOk, container_headers,
json.dumps([]))
self.app.register('DELETE', container_path,
swob.HTTPConflict, {}, '')
# simple account listing entry
container_data = {'name': container}
account_listing_data.append(container_data)
# register account response
account_listing_data.sort(key=operator.itemgetter('name'))
account_headers = {}
account_qry_string = '?format=json&marker=&end_marker='
self.app.register('GET', account_path + account_qry_string,
swob.HTTPOk, account_headers,
json.dumps(account_listing_data))
end_qry_string = '?format=json&marker=%s&end_marker=' % (
urllib.parse.quote(account_listing_data[-1]['name']))
self.app.register('GET', account_path + end_qry_string,
swob.HTTPOk, account_headers,
json.dumps([]))
class TestReconcilerUtils(unittest.TestCase):
def setUp(self):
self.fake_ring = FakeRing()
reconciler.direct_get_container_policy_index.reset()
def test_parse_raw_obj(self):
got = reconciler.parse_raw_obj({
'name': "2:/AUTH_bob/con/obj",
'hash': Timestamp(2017551.49350).internal,
'last_modified': timestamp_to_last_modified(2017551.49352),
'content_type': 'application/x-delete',
})
self.assertEqual(got['q_policy_index'], 2)
self.assertEqual(got['account'], 'AUTH_bob')
self.assertEqual(got['container'], 'con')
self.assertEqual(got['obj'], 'obj')
self.assertEqual(got['q_ts'], 2017551.49350)
self.assertEqual(got['q_record'], 2017551.49352)
self.assertEqual(got['q_op'], 'DELETE')
got = reconciler.parse_raw_obj({
'name': "1:/AUTH_bob/con/obj",
'hash': Timestamp(1234.20190).internal,
'last_modified': timestamp_to_last_modified(1234.20192),
'content_type': 'application/x-put',
})
self.assertEqual(got['q_policy_index'], 1)
self.assertEqual(got['account'], 'AUTH_bob')
self.assertEqual(got['container'], 'con')
self.assertEqual(got['obj'], 'obj')
self.assertEqual(got['q_ts'], 1234.20190)
self.assertEqual(got['q_record'], 1234.20192)
self.assertEqual(got['q_op'], 'PUT')
# negative test
obj_info = {
'name': "1:/AUTH_bob/con/obj",
'hash': Timestamp(1234.20190).internal,
'last_modified': timestamp_to_last_modified(1234.20192),
}
self.assertRaises(ValueError, reconciler.parse_raw_obj, obj_info)
obj_info['content_type'] = 'foo'
self.assertRaises(ValueError, reconciler.parse_raw_obj, obj_info)
obj_info['content_type'] = 'appliation/x-post'
self.assertRaises(ValueError, reconciler.parse_raw_obj, obj_info)
self.assertRaises(ValueError, reconciler.parse_raw_obj,
{'name': 'bogus'})
self.assertRaises(ValueError, reconciler.parse_raw_obj,
{'name': '-1:/AUTH_test/container'})
self.assertRaises(ValueError, reconciler.parse_raw_obj,
{'name': 'asdf:/AUTH_test/c/obj'})
self.assertRaises(KeyError, reconciler.parse_raw_obj,
{'name': '0:/AUTH_test/c/obj',
'content_type': 'application/x-put'})
def test_get_container_policy_index(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
]
for permutation in itertools.permutations((0, 1, 2)):
reconciler.direct_get_container_policy_index.reset()
resp_headers = [stub_resp_headers[i] for i in permutation]
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
test_values = [(info['x-storage-policy-index'],
info['x-backend-status-changed-at']) for
info in resp_headers]
self.assertEqual(oldest_spi, 0,
"oldest policy index wrong "
"for permutation %r" % test_values)
def test_get_container_policy_index_with_error(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_change_at=next(ts),
storage_policy_index=2,
),
container_resp_headers(
status_changed_at=next(ts),
storage_policy_index=1,
),
# old timestamp, but 500 should be ignored...
ClientException(
'Container Server blew up',
http_status=500, http_reason='Server Error',
http_headers=container_resp_headers(
status_changed_at=Timestamp(0).internal,
storage_policy_index=0,
),
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 2)
def test_get_container_policy_index_with_socket_error(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 1)
def test_get_container_policy_index_with_too_many_errors(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
ClientException(
'Container Server blew up',
http_status=500, http_reason='Server Error',
http_headers=container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, None)
def test_get_container_policy_index_for_deleted(self):
mock_path = 'swift.container.reconciler.direct_head_container'
headers = container_resp_headers(
status_changed_at=Timestamp(time.time()).internal,
storage_policy_index=1,
)
stub_resp_headers = [
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=headers,
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=headers,
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=headers,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 1)
def test_get_container_policy_index_for_recently_deleted(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=0,
),
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=1,
),
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=2,
),
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 2)
def test_get_container_policy_index_for_recently_recreated(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
# old put, no recreate
container_resp_headers(
delete_timestamp=0,
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=0,
),
# recently deleted
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=1,
),
),
# recently recreated
container_resp_headers(
delete_timestamp=next(ts),
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=2,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 2)
def test_get_container_policy_index_for_recently_split_brain(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
# oldest put
container_resp_headers(
delete_timestamp=0,
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=0,
),
# old recreate
container_resp_headers(
delete_timestamp=next(ts),
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=1,
),
# recently put
container_resp_headers(
delete_timestamp=0,
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=2,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 1)
def test_get_container_policy_index_cache(self):
now = time.time()
ts = itertools.count(int(now))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 0)
# re-mock with errors
stub_resp_headers = [
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
]
with mock.patch('time.time', new=lambda: now):
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
# still cached
self.assertEqual(oldest_spi, 0)
# propel time forward
the_future = now + 31
with mock.patch('time.time', new=lambda: the_future):
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
# expired
self.assertEqual(oldest_spi, None)
def test_direct_delete_container_entry(self):
mock_path = 'swift.common.direct_client.http_connect'
connect_args = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
connect_args.append({
'ipaddr': ipaddr, 'port': port, 'device': device,
'partition': partition, 'method': method, 'path': path,
'headers': headers, 'query_string': query_string})
x_timestamp = Timestamp(time.time())
headers = {'x-timestamp': x_timestamp.internal}
fake_hc = fake_http_connect(200, 200, 200, give_connect=test_connect)
with mock.patch(mock_path, fake_hc):
reconciler.direct_delete_container_entry(
self.fake_ring, 'a', 'c', 'o', headers=headers)
self.assertEqual(len(connect_args), 3)
for args in connect_args:
self.assertEqual(args['method'], 'DELETE')
self.assertEqual(args['path'], '/a/c/o')
self.assertEqual(args['headers'].get('x-timestamp'),
headers['x-timestamp'])
def test_direct_delete_container_entry_with_errors(self):
# setup mock direct_delete
mock_path = \
'swift.container.reconciler.direct_delete_container_object'
stub_resp = [
None,
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
ClientException(
'Container Server blew up',
'10.0.0.12', 6001, 'sdj', 404, 'Not Found'
),
]
mock_direct_delete = mock.MagicMock()
mock_direct_delete.side_effect = stub_resp
with mock.patch(mock_path, mock_direct_delete), \
mock.patch('eventlet.greenpool.DEBUG', False):
rv = reconciler.direct_delete_container_entry(
self.fake_ring, 'a', 'c', 'o')
self.assertEqual(rv, None)
self.assertEqual(len(mock_direct_delete.mock_calls), 3)
def test_add_to_reconciler_queue(self):
mock_path = 'swift.common.direct_client.http_connect'
connect_args = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
connect_args.append({
'ipaddr': ipaddr, 'port': port, 'device': device,
'partition': partition, 'method': method, 'path': path,
'headers': headers, 'query_string': query_string})
fake_hc = fake_http_connect(200, 200, 200, give_connect=test_connect)
with mock.patch(mock_path, fake_hc):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'DELETE')
self.assertTrue(ret)
self.assertEqual(ret, str(int(5948918.63946 // 3600 * 3600)))
self.assertEqual(len(connect_args), 3)
connect_args.sort(key=lambda a: (a['ipaddr'], a['port']))
required_headers = ('x-content-type', 'x-etag')
for args in connect_args:
self.assertEqual(args['headers']['X-Timestamp'], '5948918.63946')
self.assertEqual(args['path'],
'/.misplaced_objects/5947200/17:/a/c/o')
self.assertEqual(args['headers']['X-Content-Type'],
'application/x-delete')
for header in required_headers:
self.assertTrue(header in args['headers'],
'%r was missing request headers %r' % (
header, args['headers']))
def test_add_to_reconciler_queue_force(self):
mock_path = 'swift.common.direct_client.http_connect'
connect_args = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
connect_args.append({
'ipaddr': ipaddr, 'port': port, 'device': device,
'partition': partition, 'method': method, 'path': path,
'headers': headers, 'query_string': query_string})
fake_hc = fake_http_connect(200, 200, 200, give_connect=test_connect)
now = time.time()
with mock.patch(mock_path, fake_hc), \
mock.patch('swift.container.reconciler.time.time',
lambda: now):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'PUT',
force=True)
self.assertTrue(ret)
self.assertEqual(ret, str(int(5948918.63946 // 3600 * 3600)))
self.assertEqual(len(connect_args), 3)
connect_args.sort(key=lambda a: (a['ipaddr'], a['port']))
required_headers = ('x-size', 'x-content-type')
for args in connect_args:
self.assertEqual(args['headers']['X-Timestamp'],
Timestamp(now).internal)
self.assertEqual(args['headers']['X-Etag'], '5948918.63946')
self.assertEqual(args['path'],
'/.misplaced_objects/5947200/17:/a/c/o')
for header in required_headers:
self.assertTrue(header in args['headers'],
'%r was missing request headers %r' % (
header, args['headers']))
def test_add_to_reconciler_queue_fails(self):
mock_path = 'swift.common.direct_client.http_connect'
fake_connects = [fake_http_connect(200),
fake_http_connect(200, raise_timeout_exc=True),
fake_http_connect(507)]
def fake_hc(*a, **kw):
return fake_connects.pop()(*a, **kw)
with mock.patch(mock_path, fake_hc):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'PUT')
self.assertFalse(ret)
def test_add_to_reconciler_queue_socket_error(self):
mock_path = 'swift.common.direct_client.http_connect'
exc = socket.error(errno.ECONNREFUSED,
os.strerror(errno.ECONNREFUSED))
fake_connects = [fake_http_connect(200),
fake_http_connect(200, raise_timeout_exc=True),
fake_http_connect(500, raise_exc=exc)]
def fake_hc(*a, **kw):
return fake_connects.pop()(*a, **kw)
with mock.patch(mock_path, fake_hc):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'DELETE')
self.assertFalse(ret)
def listing_qs(marker):
return "?format=json&marker=%s&end_marker=" % \
urllib.parse.quote(marker.encode('utf-8'))
class TestReconciler(unittest.TestCase):
maxDiff = None
def setUp(self):
self.logger = debug_logger()
conf = {}
with mock.patch('swift.container.reconciler.InternalClient'):
self.reconciler = reconciler.ContainerReconciler(conf)
self.reconciler.logger = self.logger
self.start_interval = int(time.time() // 3600 * 3600)
self.current_container_path = '/v1/.misplaced_objects/%d' % (
self.start_interval) + listing_qs('')
def _mock_listing(self, objects):
self.reconciler.swift = FakeInternalClient(objects)
self.fake_swift = self.reconciler.swift.app
def _mock_oldest_spi(self, container_oldest_spi_map):
self.fake_swift._mock_oldest_spi_map = container_oldest_spi_map
def _run_once(self):
"""
Helper method to run the reconciler once with appropriate direct-client
mocks in place.
Returns the list of direct-deleted container entries in the format
[(acc1, con1, obj1), ...]
"""
def mock_oldest_spi(ring, account, container_name):
return self.fake_swift._mock_oldest_spi_map.get(container_name, 0)
items = {
'direct_get_container_policy_index': mock_oldest_spi,
'direct_delete_container_entry': mock.DEFAULT,
}
mock_time_iter = itertools.count(self.start_interval)
with mock.patch.multiple(reconciler, **items) as mocks:
self.mock_delete_container_entry = \
mocks['direct_delete_container_entry']
with mock.patch('time.time', mock_time_iter.next):
self.reconciler.run_once()
return [c[1][1:4] for c in
mocks['direct_delete_container_entry'].mock_calls]
def test_invalid_queue_name(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/bogus"): 3618.84187,
})
deleted_container_entries = self._run_once()
# we try to find something useful
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('bogus'))])
# but only get the bogus record
self.assertEqual(self.reconciler.stats['invalid_record'], 1)
# and just leave it on the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertFalse(deleted_container_entries)
def test_invalid_queue_name_marches_onward(self):
# there's something useful there on the queue
self._mock_listing({
(None, "/.misplaced_objects/3600/00000bogus"): 3600.0000,
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 1}) # already in the right spot!
deleted_container_entries = self._run_once()
# we get all the queue entries we can
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and one is garbage
self.assertEqual(self.reconciler.stats['invalid_record'], 1)
# but the other is workable
self.assertEqual(self.reconciler.stats['noop_object'], 1)
# so pop the queue for that one
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_queue_name_with_policy_index_delimiter_in_name(self):
q_path = '.misplaced_objects/3600'
obj_path = "AUTH_bob/c:sneaky/o1:sneaky"
# there's something useful there on the queue
self._mock_listing({
(None, "/%s/1:/%s" % (q_path, obj_path)): 3618.84187,
(1, '/%s' % obj_path): 3618.84187,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we find the misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/%s' % obj_path))])
# move it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/%s' % obj_path),
('DELETE', '/v1/%s' % obj_path)])
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/%s' % obj_path),
('PUT', '/v1/%s' % obj_path)])
# clean up the source
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1).internal)
# and pop the queue for that one
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries, [(
'.misplaced_objects', '3600', '1:/%s' % obj_path)])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_unable_to_direct_get_oldest_storage_policy(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
})
# the reconciler gets "None" if we can't quorum the container
self._mock_oldest_spi({'c': None})
deleted_container_entries = self._run_once()
# we look for misplaced objects
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# but can't really say where to go looking
self.assertEqual(self.reconciler.stats['unavailable_container'], 1)
# we don't clean up anything
self.assertEqual(self.reconciler.stats['cleanup_object'], 0)
# and we definitely should not pop_queue
self.assertFalse(deleted_container_entries)
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# moves it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'),
('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'),
('PUT', '/v1/AUTH_bob/c/o1')])
put_headers = self.fake_swift.storage_policy[0].headers[1]
# we PUT the object in the right place with q_ts + offset 2
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=2))
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1))
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_the_other_direction(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/0:/AUTH_bob/c/o1"): 3618.84187,
(0, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 1})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('0:/AUTH_bob/c/o1'))])
# moves it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
put_headers = self.fake_swift.storage_policy[1].headers[1]
# we PUT the object in the right place with q_ts + offset 2
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=2).internal)
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1).internal)
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '0:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_with_unicode_and_spaces(self):
# the "name" in listings and the unicode string passed to all
# functions where we call them with (account, container, obj)
obj_name = u"AUTH_bob/c \u062a/o1 \u062a"
# anytime we talk about a call made to swift for a path
obj_path = obj_name.encode('utf-8')
# this mock expects unquoted unicode because it handles container
# listings as well as paths
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/%s" % obj_name): 3618.84187,
(1, "/%s" % obj_name): 3618.84187,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
# listing_qs encodes and quotes - so give it name
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/%s' % obj_name))])
# moves it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
# these calls are to the real path
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/%s' % obj_path), # 2
('DELETE', '/v1/%s' % obj_path)]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/%s' % obj_path), # 1
('PUT', '/v1/%s' % obj_path)]) # 3
put_headers = self.fake_swift.storage_policy[0].headers[1]
# we PUT the object in the right place with q_ts + offset 2
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=2).internal)
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1).internal)
self.assertEqual(
delete_headers.get('X-Backend-Storage-Policy-Index'), '1')
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
# this mock received the name, it's encoded down in buffered_http
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/%s' % obj_name)])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_delete(self):
q_ts = time.time()
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): (
Timestamp(q_ts).internal, 'application/x-delete'),
# object exists in "correct" storage policy - slightly older
(0, "/AUTH_bob/c/o1"): Timestamp(q_ts - 1).internal,
})
self._mock_oldest_spi({'c': 0})
# the tombstone exists in the enqueued storage policy
self.fake_swift.storage_policy[1].register(
'GET', '/v1/AUTH_bob/c/o1', swob.HTTPNotFound,
{'X-Backend-Timestamp': Timestamp(q_ts).internal})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# delete it
self.assertEqual(self.reconciler.stats['delete_attempt'], 1)
self.assertEqual(self.reconciler.stats['delete_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'),
('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'),
('DELETE', '/v1/AUTH_bob/c/o1')])
reconcile_headers = self.fake_swift.storage_policy[0].headers[1]
# we DELETE the object in the right place with q_ts + offset 2
self.assertEqual(reconcile_headers.get('X-Timestamp'),
Timestamp(q_ts, offset=2).internal)
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(q_ts, offset=1))
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_enqueued_for_the_correct_dest_noop(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 1}) # already in the right spot!
deleted_container_entries = self._run_once()
# nothing to see here
self.assertEqual(self.reconciler.stats['noop_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# so we just pop the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_src_object_newer_than_queue_entry(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3600.123456,
(1, '/AUTH_bob/c/o1'): 3600.234567, # slightly newer
})
self._mock_oldest_spi({'c': 0}) # destination
# turn the crank
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# proceed with the move
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
# .. with source timestamp + offset 2
put_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3600.234567, offset=2))
# src object is cleaned up
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# ... with q_ts + offset 1
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3600.123456, offset=1))
# and queue is popped
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_src_object_older_than_queue_entry(self):
# should be some sort of retry case
q_ts = time.time()
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_bob/c/o1" % q_path): q_ts,
(1, '/AUTH_bob/c/o1'): q_ts - 0.00001, # slightly older
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/%s' % q_path + listing_qs('')),
('GET', '/v1/%s' % q_path +
listing_qs('1:/AUTH_bob/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but no object copy is attempted
self.assertEqual(self.reconciler.stats['unavailable_source'], 1)
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# src object is un-modified
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# queue is un-changed, we'll have to retry
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_src_object_unavailable_with_slightly_newer_tombstone(self):
# should be some sort of retry case
q_ts = float(Timestamp(time.time()))
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_bob/c/o1" % q_path): q_ts,
})
self._mock_oldest_spi({'c': 0})
self.fake_swift.storage_policy[1].register(
'GET', '/v1/AUTH_bob/c/o1', swob.HTTPNotFound,
{'X-Backend-Timestamp': Timestamp(q_ts, offset=2).internal})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/%s' % q_path + listing_qs('')),
('GET', '/v1/%s' % q_path +
listing_qs('1:/AUTH_bob/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but no object copy is attempted
self.assertEqual(self.reconciler.stats['unavailable_source'], 1)
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# src object is un-modified
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# queue is un-changed, we'll have to retry
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_src_object_unavailable_server_error(self):
# should be some sort of retry case
q_ts = float(Timestamp(time.time()))
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_bob/c/o1" % q_path): q_ts,
})
self._mock_oldest_spi({'c': 0})
self.fake_swift.storage_policy[1].register(
'GET', '/v1/AUTH_bob/c/o1', swob.HTTPServiceUnavailable, {})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/%s' % q_path + listing_qs('')),
('GET', '/v1/%s' % q_path +
listing_qs('1:/AUTH_bob/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but no object copy is attempted
self.assertEqual(self.reconciler.stats['unavailable_source'], 1)
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# src object is un-modified
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# queue is un-changed, we'll have to retry
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_fails_cleanup(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3600.123456,
(1, '/AUTH_bob/c/o1'): 3600.123457, # slightly newer
})
self._mock_oldest_spi({'c': 0}) # destination
# make the DELETE blow up
self.fake_swift.storage_policy[1].register(
'DELETE', '/v1/AUTH_bob/c/o1', swob.HTTPServiceUnavailable, {})
# turn the crank
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# proceed with the move
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
# .. with source timestamp + offset 2
put_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3600.123457, offset=2))
# we try to cleanup
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
# ... with q_ts + offset 1
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3600.12346, offset=1))
# but cleanup fails!
self.assertEqual(self.reconciler.stats['cleanup_failed'], 1)
# so the queue is not popped
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
# and we'll have to retry
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_src_object_is_forever_gone(self):
# oh boy, hate to be here - this is an oldy
q_ts = self.start_interval - self.reconciler.reclaim_age - 1
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): q_ts,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but it's gone :\
self.assertEqual(self.reconciler.stats['lost_source'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# gah, look, even if it was out there somewhere - we've been at this
# two weeks and haven't found it. We can't just keep looking forever,
# so... we're done
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
# dunno if this is helpful, but FWIW we don't throw tombstones?
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
self.assertEqual(self.reconciler.stats['success'], 1) # lol
def test_object_move_dest_already_moved(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3679.2019,
(1, "/AUTH_bob/c/o1"): 3679.2019,
(0, "/AUTH_bob/c/o1"): 3679.2019,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we look for misplaced objects
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# but we found it already in the right place!
self.assertEqual(self.reconciler.stats['found_object'], 1)
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# so no attempt to read the source is made, but we do cleanup
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[0]
# rather we just clean up the dark matter
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3679.2019, offset=1))
# and wipe our hands of it
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_dest_object_newer_than_queue_entry(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3679.2019,
(1, "/AUTH_bob/c/o1"): 3679.2019,
(0, "/AUTH_bob/c/o1"): 3679.2019 + 0.00001, # slightly newer
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we look for misplaced objects...
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# but we found it already in the right place!
self.assertEqual(self.reconciler.stats['found_object'], 1)
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# so not attempt to read is made, but we do cleanup
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[0]
# rather we just clean up the dark matter
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3679.2019, offset=1))
# and since we cleaned up the old object, so this counts as done
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_dest_object_older_than_queue_entry(self):
self._mock_listing({
(None, "/.misplaced_objects/36000/1:/AUTH_bob/c/o1"): 36123.38393,
(1, "/AUTH_bob/c/o1"): 36123.38393,
(0, "/AUTH_bob/c/o1"): 36123.38393 - 0.00001, # slightly older
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('36000')),
('GET', '/v1/.misplaced_objects/36000' + listing_qs('')),
('GET', '/v1/.misplaced_objects/36000' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and since our version is *newer*, we overwrite
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
# ... with a q_ts + offset 2
put_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(36123.38393, offset=2))
# then clean the dark matter
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# ... with a q_ts + offset 1
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(36123.38393, offset=1))
# and pop the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '36000', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_put_fails(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/36000/1:/AUTH_bob/c/o1"): 36123.383925,
(1, "/AUTH_bob/c/o1"): 36123.383925,
})
self._mock_oldest_spi({'c': 0})
# make the put to dest fail!
self.fake_swift.storage_policy[0].register(
'PUT', '/v1/AUTH_bob/c/o1', swob.HTTPServiceUnavailable, {})
# turn the crank
deleted_container_entries = self._run_once()
# we find a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('36000')),
('GET', '/v1/.misplaced_objects/36000' + listing_qs('')),
('GET', '/v1/.misplaced_objects/36000' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and try to move it, but it fails
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')]) # 2
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
put_headers = self.fake_swift.storage_policy[0].headers[1]
# ...with q_ts + offset 2 (20-microseconds)
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(36123.383925, offset=2))
# but it failed
self.assertEqual(self.reconciler.stats['copy_success'], 0)
self.assertEqual(self.reconciler.stats['copy_failed'], 1)
# ... so we don't clean up the source
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# and we don't pop the queue
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['unhandled_errors'], 0)
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_put_blows_up_crazy_town(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/36000/1:/AUTH_bob/c/o1"): 36123.383925,
(1, "/AUTH_bob/c/o1"): 36123.383925,
})
self._mock_oldest_spi({'c': 0})
# make the put to dest blow up crazy town
def blow_up(*args, **kwargs):
raise Exception('kaboom!')
self.fake_swift.storage_policy[0].register(
'PUT', '/v1/AUTH_bob/c/o1', blow_up, {})
# turn the crank
deleted_container_entries = self._run_once()
# we find a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('36000')),
('GET', '/v1/.misplaced_objects/36000' + listing_qs('')),
('GET', '/v1/.misplaced_objects/36000' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and attempt to move it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')]) # 2
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
put_headers = self.fake_swift.storage_policy[0].headers[1]
# ...with q_ts + offset 2 (20-microseconds)
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(36123.383925, offset=2))
# but it blows up hard
self.assertEqual(self.reconciler.stats['unhandled_error'], 1)
# so we don't cleanup
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# and we don't pop the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_no_such_object_no_tombstone_recent(self):
q_ts = float(Timestamp(time.time()))
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_jeb/c/o1" % q_path): q_ts
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/.misplaced_objects/%s' % container + listing_qs('')),
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('1:/AUTH_jeb/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_jeb/c/o1')],
)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_jeb/c/o1')],
)
# the queue entry is recent enough that there could easily be
# tombstones on offline nodes or something, so we'll just leave it
# here and try again later
self.assertEqual(deleted_container_entries, [])
def test_object_move_no_such_object_no_tombstone_ancient(self):
queue_ts = float(Timestamp(time.time())) - \
self.reconciler.reclaim_age * 1.1
container = str(int(queue_ts // 3600 * 3600))
self._mock_listing({
(
None, "/.misplaced_objects/%s/1:/AUTH_jeb/c/o1" % container
): queue_ts
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container)),
('GET', '/v1/.misplaced_objects/%s' % container + listing_qs('')),
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('1:/AUTH_jeb/c/o1'))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_jeb/c/o1')],
)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_jeb/c/o1')],
)
# the queue entry is old enough that the tombstones, if any, have
# probably been reaped, so we'll just give up
self.assertEqual(
deleted_container_entries,
[('.misplaced_objects', container, '1:/AUTH_jeb/c/o1')])
def test_delete_old_empty_queue_containers(self):
ts = time.time() - self.reconciler.reclaim_age * 1.1
container = str(int(ts // 3600 * 3600))
older_ts = ts - 3600
older_container = str(int(older_ts // 3600 * 3600))
self._mock_listing({
(None, "/.misplaced_objects/%s/" % container): 0,
(None, "/.misplaced_objects/%s/something" % older_container): 0,
})
deleted_container_entries = self._run_once()
self.assertEqual(deleted_container_entries, [])
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container)),
('GET', '/v1/.misplaced_objects/%s' % container + listing_qs('')),
('DELETE', '/v1/.misplaced_objects/%s' % container),
('GET', '/v1/.misplaced_objects/%s' % older_container +
listing_qs('')),
('GET', '/v1/.misplaced_objects/%s' % older_container +
listing_qs('something'))])
self.assertEqual(self.reconciler.stats['invalid_record'], 1)
def test_iter_over_old_containers_in_reverse(self):
step = reconciler.MISPLACED_OBJECTS_CONTAINER_DIVISOR
now = self.start_interval
containers = []
for i in range(10):
container_ts = int(now - step * i)
container_name = str(container_ts // 3600 * 3600)
containers.append(container_name)
# add some old containers too
now -= self.reconciler.reclaim_age
old_containers = []
for i in range(10):
container_ts = int(now - step * i)
container_name = str(container_ts // 3600 * 3600)
old_containers.append(container_name)
containers.sort()
old_containers.sort()
all_containers = old_containers + containers
self._mock_listing(dict((
(None, "/.misplaced_objects/%s/" % container), 0
) for container in all_containers))
deleted_container_entries = self._run_once()
self.assertEqual(deleted_container_entries, [])
last_container = all_containers[-1]
account_listing_calls = [
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(last_container)),
]
new_container_calls = [
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('')) for container in reversed(containers)
][1:] # current_container get's skipped the second time around...
old_container_listings = [
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('')) for container in reversed(old_containers)
]
old_container_deletes = [
('DELETE', '/v1/.misplaced_objects/%s' % container)
for container in reversed(old_containers)
]
old_container_calls = list(itertools.chain(*zip(
old_container_listings, old_container_deletes)))
self.assertEqual(self.fake_swift.calls,
[('GET', self.current_container_path)] +
account_listing_calls + new_container_calls +
old_container_calls)
def test_error_in_iter_containers(self):
self._mock_listing({})
# make the listing return an error
self.fake_swift.storage_policy[None].register(
'GET', '/v1/.misplaced_objects' + listing_qs(''),
swob.HTTPServiceUnavailable, {})
self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs(''))])
self.assertEqual(self.reconciler.stats, {})
errors = self.reconciler.logger.get_lines_for_level('error')
self.assertEqual(errors, [
'Error listing containers in account '
'.misplaced_objects (Unexpected response: '
'503 Service Unavailable)'])
def test_unhandled_exception_in_reconcile(self):
self._mock_listing({})
# make the listing blow up
def blow_up(*args, **kwargs):
raise Exception('kaboom!')
self.fake_swift.storage_policy[None].register(
'GET', '/v1/.misplaced_objects' + listing_qs(''),
blow_up, {})
self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs(''))])
self.assertEqual(self.reconciler.stats, {})
errors = self.reconciler.logger.get_lines_for_level('error')
self.assertEqual(errors,
['Unhandled Exception trying to reconcile: '])
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
# Import PyQt5 classes
from .qt import *
import os
import sys
import numpy as np
import types
from collections import defaultdict, OrderedDict
import operator
import logging
try:
import xml.etree.cElementTree as et
except ImportError:
import xml.etree.ElementTree as et
try:
QVariant
except NameError:
QVariant = None
RECALCULATE_ALL = 1
RECALCULATE_VIEW = 2
def types_MethodType(fn, handler):
try:
return types.MethodType(fn, handler, type(handler))
except TypeError:
return types.MethodType(fn, handler)
def _convert_list_type_from_XML(vs):
'''
Lists are a complex type with possibility for mixed sub-types. Therefore each
sub-entity must be wrapped with a type specifier.
'''
vlist = vs.findall('ListItem') + vs.findall('ConfigListItem') # ConfigListItem is legacy
l = []
for xconfig in vlist:
v = xconfig.text
if xconfig.get('type') in CONVERT_TYPE_FROM_XML:
# Recursive; woo!
v = CONVERT_TYPE_FROM_XML[xconfig.get('type')](xconfig)
l.append(v)
return l
def _convert_list_type_to_XML(co, vs):
'''
Lists are a complex type with possibility for mixed sub-types. Therefore each
sub-entity must be wrapped with a type specifier.
'''
for cv in vs:
c = et.SubElement(co, "ListItem")
t = type(cv).__name__
c.set("type", t)
c = CONVERT_TYPE_TO_XML[t](c, cv)
return co
def _convert_dict_type_from_XML(vs):
'''
Dicts are a complex type with possibility for mixed sub-types. Therefore each
sub-entity must be wrapped with a type specifier.
'''
vlist = vs.findall('DictItem')
d = {}
for xconfig in vlist:
v = xconfig.text
if xconfig.get('type') in CONVERT_TYPE_FROM_XML:
# Recursive; woo!
v = CONVERT_TYPE_FROM_XML[xconfig.get('type')](xconfig)
d[xconfig.get('key')] = v
return d
def _convert_dict_type_to_XML(co, vs):
'''
Dicts are a complex type with possibility for mixed sub-types. Therefore each
sub-entity must be wrapped with a type specifier.
'''
for k, v in vs.items():
c = et.SubElement(co, "DictItem")
t = type(v).__name__
c.set("type", t)
c.set("key", k)
c = CONVERT_TYPE_TO_XML[t](c, v)
return co
def _apply_text_str(co, s):
co.text = str(s)
return co
CONVERT_TYPE_TO_XML = {
'str': _apply_text_str,
'unicode': _apply_text_str,
'int': _apply_text_str,
'float': _apply_text_str,
'bool': _apply_text_str,
'list': _convert_list_type_to_XML,
'tuple': _convert_list_type_to_XML,
'dict': _convert_dict_type_to_XML,
'NoneType': _apply_text_str,
}
CONVERT_TYPE_FROM_XML = {
'str': lambda x: str(x.text),
'unicode': lambda x: str(x.text),
'int': lambda x: int(x.text),
'float': lambda x: float(x.text),
'bool': lambda x: bool(x.text.lower() == 'true'),
'list': _convert_list_type_from_XML,
'tuple': _convert_list_type_from_XML,
'dict': _convert_dict_type_from_XML,
'NoneType': lambda x: None,
}
def build_dict_mapper(mdict):
'''
Build a map function pair for forward and reverse mapping from a specified dict
Mapping requires both a forward and reverse (get, set) mapping function. This function
is used to automatically convert a supplied dict to a forward and reverse paired lambda.
:param mdict: A dictionary of display values (keys) and stored values (values)
:type mdict: dict
:rtype: 2-tuple of lambdas that perform forward and reverse map
'''
rdict = {v: k for k, v in mdict.items()}
return (
lambda x: mdict[x] if x in mdict else x,
lambda x: rdict[x] if x in rdict else x,
)
try:
# Python2.7
unicode
except:
# Python3 recoding
def unicode(s):
if isinstance(s, bytes):
return s.decode('utf-8')
else:
return s
# Basestring for typechecking
try:
basestring
except:
basestring = str
def build_tuple_mapper(mlist):
'''
Build a map function pair for forward and reverse mapping from a specified list of tuples
:param mlist: A list of tuples of display values (keys) and stored values (values)
:type mlist: list-of-tuples
:rtype: 2-tuple of lambdas that perform forward and reverse map
'''
rdict = {v: k for k, v in mlist}
return (
lambda x: mdict[x] if x in mdict else x,
lambda x: rdict[x] if x in rdict else x,
)
# CUSTOM HANDLERS
# QComboBox
def _get_QComboBox(self):
"""
Get value QCombobox via re-mapping filter
"""
return self._get_map(self.currentText())
def _set_QComboBox(self, v):
"""
Set value QCombobox via re-mapping filter
"""
self.setCurrentIndex(self.findText(unicode(self._set_map(v))))
def _event_QComboBox(self):
"""
Return QCombobox change event signal
"""
return self.currentIndexChanged
# QCheckBox
def _get_QCheckBox(self):
"""
Get state of QCheckbox
"""
return self.isChecked()
def _set_QCheckBox(self, v):
"""
Set state of QCheckbox
"""
self.setChecked(v)
def _event_QCheckBox(self):
"""
Return state change signal for QCheckbox
"""
return self.stateChanged
# QAction
def _get_QAction(self):
"""
Get checked state of QAction
"""
return self.isChecked()
def _set_QAction(self, v):
"""
Set checked state of QAction
"""
self.setChecked(v)
def _event_QAction(self):
"""
Return state change signal for QAction
"""
return self.toggled
# QActionGroup
def _get_QActionGroup(self):
"""
Get checked state of QAction
"""
if self.checkedAction():
return self.actions().index(self.checkedAction())
else:
return None
def _set_QActionGroup(self, v):
"""
Set checked state of QAction
"""
self.actions()[v].setChecked(True)
def _event_QActionGroup(self):
"""
Return state change signal for QAction
"""
return self.triggered
# QPushButton
def _get_QPushButton(self):
"""
Get checked state of QPushButton
"""
return self.isChecked()
def _set_QPushButton(self, v):
"""
Set checked state of QPushButton
"""
self.setChecked(v)
def _event_QPushButton(self):
"""
Return state change signal for QPushButton
"""
return self.toggled
# QSpinBox
def _get_QSpinBox(self):
"""
Get current value for QSpinBox
"""
return self.value()
def _set_QSpinBox(self, v):
"""
Set current value for QSpinBox
"""
self.setValue(v)
def _event_QSpinBox(self):
"""
Return value change signal for QSpinBox
"""
return self.valueChanged
# QDoubleSpinBox
def _get_QDoubleSpinBox(self):
"""
Get current value for QDoubleSpinBox
"""
return self.value()
def _set_QDoubleSpinBox(self, v):
"""
Set current value for QDoubleSpinBox
"""
self.setValue(v)
def _event_QDoubleSpinBox(self):
"""
Return value change signal for QDoubleSpinBox
"""
return self.valueChanged
# QPlainTextEdit
def _get_QPlainTextEdit(self):
"""
Get current document text for QPlainTextEdit
"""
return self.document().toPlainText()
def _set_QPlainTextEdit(self, v):
"""
Set current document text for QPlainTextEdit
"""
self.setPlainText(unicode(v))
def _event_QPlainTextEdit(self):
"""
Return current value changed signal for QPlainTextEdit box.
Note that this is not a native Qt signal but a signal manually fired on
the user's pressing the "Apply changes" to the code button. Attaching to the
modified signal would trigger recalculation on every key-press.
"""
return self.sourceChangesApplied
# QLineEdit
def _get_QLineEdit(self):
"""
Get current text for QLineEdit
"""
return self._get_map(self.text())
def _set_QLineEdit(self, v):
"""
Set current text for QLineEdit
"""
self.setText(unicode(self._set_map(v)))
def _event_QLineEdit(self):
"""
Return current value changed signal for QLineEdit box.
"""
return self.textChanged
# CodeEditor
def _get_CodeEditor(self):
"""
Get current document text for CodeEditor. Wraps _get_QPlainTextEdit.
"""
_get_QPlainTextEdit(self)
def _set_CodeEditor(self, v):
"""
Set current document text for CodeEditor. Wraps _set_QPlainTextEdit.
"""
_set_QPlainTextEdit(self, unicode(v))
def _event_CodeEditor(self):
"""
Return current value changed signal for CodeEditor box. Wraps _event_QPlainTextEdit.
"""
return _event_QPlainTextEdit(self)
# QListWidget
def _get_QListWidget(self):
"""
Get currently selected values in QListWidget via re-mapping filter.
Selected values are returned as a list.
"""
return [self._get_map(s.text()) for s in self.selectedItems()]
def _set_QListWidget(self, v):
"""
Set currently selected values in QListWidget via re-mapping filter.
Supply values to be selected as a list.
"""
if v:
for s in v:
self.findItems(unicode(self._set_map(s)), Qt.MatchExactly)[0].setSelected(True)
def _event_QListWidget(self):
"""
Return current selection changed signal for QListWidget.
"""
return self.itemSelectionChanged
# QListWidgetWithAddRemoveEvent
def _get_QListWidgetAddRemove(self):
"""
Get current values in QListWidget via re-mapping filter.
Selected values are returned as a list.
"""
return [self._get_map(self.item(n).text()) for n in range(0, self.count())]
def _set_QListWidgetAddRemove(self, v):
"""
Set currently values in QListWidget via re-mapping filter.
Supply values to be selected as a list.
"""
block = self.blockSignals(True)
self.clear()
self.addItems([unicode(self._set_map(s)) for s in v])
self.blockSignals(block)
self.itemAddedOrRemoved.emit()
def _event_QListWidgetAddRemove(self):
"""
Return current selection changed signal for QListWidget.
"""
return self.itemAddedOrRemoved
# QColorButton
def _get_QColorButton(self):
"""
Get current value for QColorButton
"""
return self.color()
def _set_QColorButton(self, v):
"""
Set current value for QColorButton
"""
self.setColor(v)
def _event_QColorButton(self):
"""
Return value change signal for QColorButton
"""
return self.colorChanged
# QNoneDoubleSpinBox
def _get_QNoneDoubleSpinBox(self):
"""
Get current value for QDoubleSpinBox
"""
return self.value()
def _set_QNoneDoubleSpinBox(self, v):
"""
Set current value for QDoubleSpinBox
"""
self.setValue(v)
def _event_QNoneDoubleSpinBox(self):
"""
Return value change signal for QDoubleSpinBox
"""
return self.valueChanged
#QCheckTreeWidget
def _get_QCheckTreeWidget(self):
"""
Get currently checked values in QCheckTreeWidget via re-mapping filter.
Selected values are returned as a list.
"""
return [self._get_map(s) for s in self._checked_item_cache]
def _set_QCheckTreeWidget(self, v):
"""
Set currently checked values in QCheckTreeWidget via re-mapping filter.
Supply values to be selected as a list.
"""
if v:
for s in v:
f = self.findItems(unicode(self._set_map(s)), Qt.MatchExactly | Qt.MatchRecursive)
if f:
f[0].setCheckState(0, Qt.Checked)
def _event_QCheckTreeWidget(self):
"""
Return current checked changed signal for QCheckTreeWidget.
"""
return self.itemCheckedChanged
# QSlider
def _get_QSlider(self):
"""
Get current value for QSlider
"""
return self.value()
def _set_QSlider(self, v):
"""
Set current value for QSlider
"""
self.setValue(v)
def _event_QSlider(self):
"""
Return value change signal for QSlider
"""
return self.valueChanged
#QButtonGroup
def _get_QButtonGroup(self):
"""
Get a list of (index, checked) tuples for the buttons in the group
"""
return [(nr, btn.isChecked()) for nr, btn in enumerate(self.buttons())]
def _set_QButtonGroup(self, v):
"""
Set the states for all buttons in a group from a list of (index, checked) tuples
"""
for idx, state in v:
self.buttons()[idx].setChecked(state)
def _event_QButtonGroup(self):
"""
Return button clicked signal for QButtonGroup
"""
return self.buttonClicked
#QTabWidget
def _get_QTabWidget(self):
"""
Get the current tabulator index
"""
return self.currentIndex()
def _set_QTabWidget(self, v):
"""
Set the current tabulator index
"""
self.setCurrentIndex(v)
def _event_QTabWidget(self):
"""
Return currentChanged signal for QTabWidget
"""
return self.currentChanged
HOOKS = {
QComboBox: (_get_QComboBox, _set_QComboBox, _event_QComboBox),
QCheckBox: (_get_QCheckBox, _set_QCheckBox, _event_QCheckBox),
QAction: (_get_QAction, _set_QAction, _event_QAction),
QActionGroup: (_get_QActionGroup, _set_QActionGroup, _event_QActionGroup),
QPushButton: (_get_QPushButton, _set_QPushButton, _event_QPushButton),
QSpinBox: (_get_QSpinBox, _set_QSpinBox, _event_QSpinBox),
QDoubleSpinBox: (_get_QDoubleSpinBox, _set_QDoubleSpinBox, _event_QDoubleSpinBox),
QPlainTextEdit: (_get_QPlainTextEdit, _set_QPlainTextEdit, _event_QPlainTextEdit),
QLineEdit: (_get_QLineEdit, _set_QLineEdit, _event_QLineEdit),
QListWidget: (_get_QListWidget, _set_QListWidget, _event_QListWidget),
QSlider: (_get_QSlider, _set_QSlider, _event_QSlider),
QButtonGroup: (_get_QButtonGroup, _set_QButtonGroup, _event_QButtonGroup),
QTabWidget: (_get_QTabWidget, _set_QTabWidget, _event_QTabWidget)
}
# ConfigManager handles configuration for a given appview
# Supports default values, change signals, export/import from file (for workspace saving)
class ConfigManagerBase(QObject):
# Signals
updated = pyqtSignal(int) # Triggered anytime configuration is changed (refresh)
def __init__(self, defaults=None, *args, **kwargs):
super(ConfigManagerBase, self).__init__(*args, **kwargs)
self.mutex = QMutex()
self.hooks = HOOKS
self.reset()
if defaults is None:
defaults = {}
self.defaults = defaults # Same mapping as above, used when config not set
def _get(self, key):
with QMutexLocker(self.mutex):
try:
return self.config[key]
except:
return None
def _get_default(self, key):
with QMutexLocker(self.mutex):
try:
return self.defaults[key]
except:
return None
# Get config
def get(self, key):
"""
Get config value for a given key from the config manager.
Returns the value that matches the supplied key. If the value is not set a
default value will be returned as set by set_defaults.
:param key: The configuration key to return a config value for
:type key: str
:rtype: Any supported (str, int, bool, list-of-supported-types)
"""
v = self._get(key)
if v is not None:
return v
else:
return self._get_default(key)
def set(self, key, value, trigger_handler=True, trigger_update=True):
"""
Set config value for a given key in the config manager.
Set key to value. The optional trigger_update determines whether event hooks
will fire for this key (and so re-calculation). It is useful to suppress these
when updating multiple values for example.
:param key: The configuration key to set
:type key: str
:param value: The value to set the configuration key to
:type value: Any supported (str, int, bool, list-of-supported-types)
:rtype: bool (success)
"""
old = self._get(key)
if old is not None and old == value:
return False # Not updating
# Set value
self._set(key, value)
if trigger_handler and key in self.handlers:
# Trigger handler to update the view
getter = self.handlers[key].getter
setter = self.handlers[key].setter
if setter and getter() != self._get(key):
setter(self._get(key))
# Trigger update notification
if trigger_update:
self.updated.emit(self.eventhooks[key] if key in self.eventhooks else RECALCULATE_ALL)
return True
# Defaults are used in absence of a set value (use for base settings)
def set_default(self, key, value, eventhook=RECALCULATE_ALL):
"""
Set the default value for a given key.
This will be returned if the value is
not set in the current config. It is important to include defaults for all
possible config values for backward compatibility with earlier versions of a plugin.
:param key: The configuration key to set
:type key: str
:param value: The value to set the configuration key to
:type value: Any supported (str, int, bool, list-of-supported-types)
:param eventhook: Attach either a full recalculation trigger (default), or a view-only recalculation trigger to these values.
:type eventhook: int RECALCULATE_ALL, RECALCULATE_VIEWS
"""
self.defaults[key] = value
self.eventhooks[key] = eventhook
self.updated.emit(eventhook)
def set_defaults(self, keyvalues, eventhook=RECALCULATE_ALL):
"""
Set the default value for a set of keys.
These will be returned if the value is
not set in the current config. It is important to include defaults for all
possible config values for backward compatibility with earlier versions of a plugin.
:param keyvalues: A dictionary of keys and values to set as defaults
:type key: dict
:param eventhook: Attach either a full recalculation trigger (default), or a view-only recalculation trigger to these values.
:type eventhook: int RECALCULATE_ALL, RECALCULATE_VIEWS
"""
for key, value in list(keyvalues.items()):
self.defaults[key] = value
self.eventhooks[key] = eventhook
# Updating the defaults may update the config (if anything without a config value
# is set by it; should check)
self.updated.emit(eventhook)
# Completely replace current config (wipe all other settings)
def replace(self, keyvalues, trigger_update=True):
"""
Completely reset the config with a set of key values.
Note that this does not wipe handlers or triggers (see reset), it simply replaces the values
in the config entirely. It is the equivalent of unsetting all keys, followed by a
set_many. Anything not in the supplied keyvalues will revert to default.
:param keyvalues: A dictionary of keys and values to set as defaults
:type keyvalues: dict
:param trigger_update: Flag whether to trigger a config update (+recalculation) after all values are set.
:type trigger_update: bool
"""
self.config = []
self.set_many(keyvalues)
def set_many(self, keyvalues, trigger_update=True):
"""
Set the value of multiple config settings simultaneously.
This postpones the
triggering of the update signal until all values are set to prevent excess signals.
The trigger_update option can be set to False to prevent any update at all.
:param keyvalues: A dictionary of keys and values to set.
:type key: dict
:param trigger_update: Flag whether to trigger a config update (+recalculation) after all values are set.
:type trigger_update: bool
"""
has_updated = False
for k, v in list(keyvalues.items()):
u = self.set(k, v, trigger_update=False)
has_updated = has_updated or u
if has_updated and trigger_update:
self.updated.emit(RECALCULATE_ALL)
return has_updated
# HANDLERS
# Handlers are UI elements (combo, select, checkboxes) that automatically update
# and updated from the config manager. Allows instantaneous updating on config
# changes and ensuring that elements remain in sync
def add_handler(self, key, handler, mapper=(lambda x: x, lambda x: x),
auto_set_default=True, default=None):
"""
Add a handler (UI element) for a given config key.
The supplied handler should be a QWidget or QAction through which the user
can change the config setting. An automatic getter, setter and change-event
handler is attached which will keep the widget and config in sync. The attached
handler will default to the correct value from the current config.
An optional mapper may also be provider to handler translation from the values
shown in the UI and those saved/loaded from the config.
"""
# Add map handler for converting displayed values to internal config data
if isinstance(mapper, (dict, OrderedDict)): # By default allow dict types to be used
mapper = build_dict_mapper(mapper)
elif isinstance(mapper, list) and isinstance(mapper[0], tuple):
mapper = build_tuple_mapper(mapper)
handler._get_map, handler._set_map = mapper
if key in self.handlers: # Already there; so skip must remove first to replace
return
self.handlers[key] = handler
# Look for class in hooks and add getter, setter, updater
cls = self._get_hook(handler)
hookg, hooks, hooku = self.hooks[cls]
handler.getter = types_MethodType(hookg, handler)
handler.setter = types_MethodType(hooks, handler)
handler.updater = types_MethodType(hooku, handler)
logging.debug("Add handler %s for %s" % (type(handler).__name__, key))
handler_callback = lambda x = None: self.set(key, handler.getter(),
trigger_handler=False)
handler.updater().connect(handler_callback)
# Store this so we can issue a specific remove on deletes
self.handler_callbacks[key] = handler_callback
# If the key is not in defaults, set the default to match the handler
if key not in self.defaults:
if default is None:
self.set_default(key, handler.getter())
else:
self.set_default(key, default)
# Keep handler and data consistent
if self._get(key) is not None:
handler.setter(self._get(key))
# If the key is in defaults; set the handler to the default state (but don't add to config)
elif key in self.defaults:
handler.setter(self.defaults[key])
def _get_hook(self, handler):
fst = lambda x: next(x, None)
cls = fst(x for x in self.hooks.keys() if x == type(handler))
if cls is None:
cls = fst(x for x in self.hooks.keys() if isinstance(handler, x))
if cls is None:
raise TypeError("No handler-functions available for this widget "
"type (%s)" % type(handler).__name__)
return cls
def add_handlers(self, keyhandlers):
for key, handler in list(keyhandlers.items()):
self.add_handler(key, handler)
def remove_handler(self, key):
if key in self.handlers:
handler = self.handlers[key]
handler.updater().disconnect(self.handler_callbacks[key])
del self.handlers[key]
def add_hooks(self, key, hooks):
self.hooks[key] = hooks
def getXMLConfig(self, root):
config = et.SubElement(root, "Config")
for ck, cv in list(self.config.items()):
co = et.SubElement(config, "ConfigSetting")
co.set("id", ck)
t = type(cv).__name__
co.set("type", type(cv).__name__)
co = CONVERT_TYPE_TO_XML[t](co, cv)
return root
def setXMLConfig(self, root):
config = {}
for xconfig in root.findall('Config/ConfigSetting'):
#id="experiment_control" type="unicode" value="monocyte at intermediate differentiation stage (GDS2430_2)"/>
if xconfig.get('type') in CONVERT_TYPE_FROM_XML:
v = CONVERT_TYPE_FROM_XML[xconfig.get('type')](xconfig)
config[xconfig.get('id')] = v
self.set_many(config, trigger_update=False)
def as_dict(self):
'''
Return the combination of defaults and config as a flat dict (so it can be pickled)
'''
result_dict = {}
for k, v in self.defaults.items():
result_dict[k] = self.get(k)
return result_dict
class ConfigManager(ConfigManagerBase):
def reset(self):
"""
Reset the config manager to it's initialised state.
This clears all values, unsets all defaults and removes all handlers, maps, and hooks.
"""
self.config = {}
self.handlers = {}
self.handler_callbacks = {}
self.defaults = {}
self.maps = {}
self.eventhooks = {}
def _get(self, key):
with QMutexLocker(self.mutex):
try:
return self.config[key]
except:
return None
def _set(self, key, value):
with QMutexLocker(self.mutex):
self.config[key] = value
class QSettingsManager(ConfigManagerBase):
def reset(self):
"""
Reset the config manager to it's initialised state.
This initialises QSettings, unsets all defaults and removes all handlers, maps, and hooks.
"""
self.settings = QSettings()
self.handlers = {}
self.handler_callbacks = {}
self.defaults = {}
self.maps = {}
self.eventhooks = {}
def _get(self, key):
with QMutexLocker(self.mutex):
v = self.settings.value(key, None)
if v is not None:
if type(v) == QVariant and v.type() == QVariant.Invalid: # Invalid check for Qt4
return None
# Map type to that in defaults: required in case QVariant is a string
# representation of the actual value (e.g. on Windows Reg)
vt = type(v)
if key in self.defaults:
dt = type(self.defaults[key])
if vt == QVariant:
# The target type is a QVariant so munge it
# If QVariant (Qt4):
type_munge = {
int: v.toInt,
float: v.toFloat,
str: v.toString,
unicode: v.toString,
bool: v.toBool,
list: v.toStringList,
}
v = type_munge[dt]()
elif vt != dt and vt == basestring:
# Value is stored as unicode so munge it
type_munge = {
int: lambda x: int(x),
float: lambda x: float(x),
str: lambda x: str(x),
bool: lambda x: x.lower() == u'true',
# other types?
}
v = type_munge[dt](v)
v = dt(v)
return v
else:
return None
def _set(self, key, value):
with QMutexLocker(self.mutex):
self.settings.setValue(key, value)
|
|
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User friendly container for Google Cloud Bigtable Table."""
from google.cloud._helpers import _to_bytes
from google.cloud.bigtable._generated import (
bigtable_pb2 as data_messages_v2_pb2)
from google.cloud.bigtable._generated import (
bigtable_table_admin_pb2 as table_admin_messages_v2_pb2)
from google.cloud.bigtable._generated import (
table_pb2 as table_v2_pb2)
from google.cloud.bigtable.column_family import _gc_rule_from_pb
from google.cloud.bigtable.column_family import ColumnFamily
from google.cloud.bigtable.row import AppendRow
from google.cloud.bigtable.row import ConditionalRow
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable.row_data import PartialRowsData
class Table(object):
"""Representation of a Google Cloud Bigtable Table.
.. note::
We don't define any properties on a table other than the name.
The only other fields are ``column_families`` and ``granularity``,
The ``column_families`` are not stored locally and
``granularity`` is an enum with only one value.
We can use a :class:`Table` to:
* :meth:`create` the table
* :meth:`rename` the table
* :meth:`delete` the table
* :meth:`list_column_families` in the table
:type table_id: str
:param table_id: The ID of the table.
:type instance: :class:`Instance <.instance.Instance>`
:param instance: The instance that owns the table.
"""
def __init__(self, table_id, instance):
self.table_id = table_id
self._instance = instance
@property
def name(self):
"""Table name used in requests.
.. note::
This property will not change if ``table_id`` does not, but the
return value is not cached.
The table name is of the form
``"projects/../instances/../tables/{table_id}"``
:rtype: str
:returns: The table name.
"""
return self._instance.name + '/tables/' + self.table_id
def column_family(self, column_family_id, gc_rule=None):
"""Factory to create a column family associated with this table.
:type column_family_id: str
:param column_family_id: The ID of the column family. Must be of the
form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type gc_rule: :class:`.GarbageCollectionRule`
:param gc_rule: (Optional) The garbage collection settings for this
column family.
:rtype: :class:`.ColumnFamily`
:returns: A column family owned by this table.
"""
return ColumnFamily(column_family_id, self, gc_rule=gc_rule)
def row(self, row_key, filter_=None, append=False):
"""Factory to create a row associated with this table.
.. warning::
At most one of ``filter_`` and ``append`` can be used in a
:class:`Row`.
:type row_key: bytes
:param row_key: The key for the row being created.
:type filter_: :class:`.RowFilter`
:param filter_: (Optional) Filter to be used for conditional mutations.
See :class:`.DirectRow` for more details.
:type append: bool
:param append: (Optional) Flag to determine if the row should be used
for append mutations.
:rtype: :class:`.DirectRow`
:returns: A row owned by this table.
:raises: :class:`ValueError <exceptions.ValueError>` if both
``filter_`` and ``append`` are used.
"""
if append and filter_ is not None:
raise ValueError('At most one of filter_ and append can be set')
if append:
return AppendRow(row_key, self)
elif filter_ is not None:
return ConditionalRow(row_key, self, filter_=filter_)
else:
return DirectRow(row_key, self)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other.table_id == self.table_id and
other._instance == self._instance)
def __ne__(self, other):
return not self.__eq__(other)
def create(self, initial_split_keys=None, column_families=()):
"""Creates this table.
.. note::
A create request returns a
:class:`._generated.table_pb2.Table` but we don't use
this response.
:type initial_split_keys: list
:param initial_split_keys: (Optional) List of row keys that will be
used to initially split the table into
several tablets (Tablets are similar to
HBase regions). Given two split keys,
``"s1"`` and ``"s2"``, three tablets will be
created, spanning the key ranges:
``[, s1)``, ``[s1, s2)``, ``[s2, )``.
:type column_families: list
:param column_families: (Optional) List or other iterable of
:class:`.ColumnFamily` instances.
"""
if initial_split_keys is not None:
split_pb = table_admin_messages_v2_pb2.CreateTableRequest.Split
initial_split_keys = [
split_pb(key=key) for key in initial_split_keys]
table_pb = None
if column_families:
table_pb = table_v2_pb2.Table()
for col_fam in column_families:
curr_id = col_fam.column_family_id
table_pb.column_families[curr_id].MergeFrom(col_fam.to_pb())
request_pb = table_admin_messages_v2_pb2.CreateTableRequest(
initial_splits=initial_split_keys or [],
parent=self._instance.name,
table_id=self.table_id,
table=table_pb,
)
client = self._instance._client
# We expect a `._generated.table_pb2.Table`
client._table_stub.CreateTable(request_pb)
def delete(self):
"""Delete this table."""
request_pb = table_admin_messages_v2_pb2.DeleteTableRequest(
name=self.name)
client = self._instance._client
# We expect a `google.protobuf.empty_pb2.Empty`
client._table_stub.DeleteTable(request_pb)
def list_column_families(self):
"""List the column families owned by this table.
:rtype: dict
:returns: Dictionary of column families attached to this table. Keys
are strings (column family names) and values are
:class:`.ColumnFamily` instances.
:raises: :class:`ValueError <exceptions.ValueError>` if the column
family name from the response does not agree with the computed
name from the column family ID.
"""
request_pb = table_admin_messages_v2_pb2.GetTableRequest(
name=self.name)
client = self._instance._client
# We expect a `._generated.table_pb2.Table`
table_pb = client._table_stub.GetTable(request_pb)
result = {}
for column_family_id, value_pb in table_pb.column_families.items():
gc_rule = _gc_rule_from_pb(value_pb.gc_rule)
column_family = self.column_family(column_family_id,
gc_rule=gc_rule)
result[column_family_id] = column_family
return result
def read_row(self, row_key, filter_=None):
"""Read a single row from this table.
:type row_key: bytes
:param row_key: The key of the row to read from.
:type filter_: :class:`.RowFilter`
:param filter_: (Optional) The filter to apply to the contents of the
row. If unset, returns the entire row.
:rtype: :class:`.PartialRowData`, :data:`NoneType <types.NoneType>`
:returns: The contents of the row if any chunks were returned in
the response, otherwise :data:`None`.
:raises: :class:`ValueError <exceptions.ValueError>` if a commit row
chunk is never encountered.
"""
request_pb = _create_row_request(self.name, row_key=row_key,
filter_=filter_)
client = self._instance._client
response_iterator = client._data_stub.ReadRows(request_pb)
rows_data = PartialRowsData(response_iterator)
rows_data.consume_all()
if rows_data.state not in (rows_data.NEW_ROW, rows_data.START):
raise ValueError('The row remains partial / is not committed.')
if len(rows_data.rows) == 0:
return None
return rows_data.rows[row_key]
def read_rows(self, start_key=None, end_key=None, limit=None,
filter_=None):
"""Read rows from this table.
:type start_key: bytes
:param start_key: (Optional) The beginning of a range of row keys to
read from. The range will include ``start_key``. If
left empty, will be interpreted as the empty string.
:type end_key: bytes
:param end_key: (Optional) The end of a range of row keys to read from.
The range will not include ``end_key``. If left empty,
will be interpreted as an infinite string.
:type limit: int
:param limit: (Optional) The read will terminate after committing to N
rows' worth of results. The default (zero) is to return
all results.
:type filter_: :class:`.RowFilter`
:param filter_: (Optional) The filter to apply to the contents of the
specified row(s). If unset, reads every column in
each row.
:rtype: :class:`.PartialRowsData`
:returns: A :class:`.PartialRowsData` convenience wrapper for consuming
the streamed results.
"""
request_pb = _create_row_request(
self.name, start_key=start_key, end_key=end_key, filter_=filter_,
limit=limit)
client = self._instance._client
response_iterator = client._data_stub.ReadRows(request_pb)
# We expect an iterator of `data_messages_v2_pb2.ReadRowsResponse`
return PartialRowsData(response_iterator)
def sample_row_keys(self):
"""Read a sample of row keys in the table.
The returned row keys will delimit contiguous sections of the table of
approximately equal size, which can be used to break up the data for
distributed tasks like mapreduces.
The elements in the iterator are a SampleRowKeys response and they have
the properties ``offset_bytes`` and ``row_key``. They occur in sorted
order. The table might have contents before the first row key in the
list and after the last one, but a key containing the empty string
indicates "end of table" and will be the last response given, if
present.
.. note::
Row keys in this list may not have ever been written to or read
from, and users should therefore not make any assumptions about the
row key structure that are specific to their use case.
The ``offset_bytes`` field on a response indicates the approximate
total storage space used by all rows in the table which precede
``row_key``. Buffering the contents of all rows between two subsequent
samples would require space roughly equal to the difference in their
``offset_bytes`` fields.
:rtype: :class:`~google.cloud.exceptions.GrpcRendezvous`
:returns: A cancel-able iterator. Can be consumed by calling ``next()``
or by casting to a :class:`list` and can be cancelled by
calling ``cancel()``.
"""
request_pb = data_messages_v2_pb2.SampleRowKeysRequest(
table_name=self.name)
client = self._instance._client
response_iterator = client._data_stub.SampleRowKeys(request_pb)
return response_iterator
def _create_row_request(table_name, row_key=None, start_key=None, end_key=None,
filter_=None, limit=None):
"""Creates a request to read rows in a table.
:type table_name: str
:param table_name: The name of the table to read from.
:type row_key: bytes
:param row_key: (Optional) The key of a specific row to read from.
:type start_key: bytes
:param start_key: (Optional) The beginning of a range of row keys to
read from. The range will include ``start_key``. If
left empty, will be interpreted as the empty string.
:type end_key: bytes
:param end_key: (Optional) The end of a range of row keys to read from.
The range will not include ``end_key``. If left empty,
will be interpreted as an infinite string.
:type filter_: :class:`.RowFilter`
:param filter_: (Optional) The filter to apply to the contents of the
specified row(s). If unset, reads the entire table.
:type limit: int
:param limit: (Optional) The read will terminate after committing to N
rows' worth of results. The default (zero) is to return
all results.
:rtype: :class:`data_messages_v2_pb2.ReadRowsRequest`
:returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs.
:raises: :class:`ValueError <exceptions.ValueError>` if both
``row_key`` and one of ``start_key`` and ``end_key`` are set
"""
request_kwargs = {'table_name': table_name}
if (row_key is not None and
(start_key is not None or end_key is not None)):
raise ValueError('Row key and row range cannot be '
'set simultaneously')
range_kwargs = {}
if start_key is not None or end_key is not None:
if start_key is not None:
range_kwargs['start_key_closed'] = _to_bytes(start_key)
if end_key is not None:
range_kwargs['end_key_open'] = _to_bytes(end_key)
if filter_ is not None:
request_kwargs['filter'] = filter_.to_pb()
if limit is not None:
request_kwargs['rows_limit'] = limit
message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs)
if row_key is not None:
message.rows.row_keys.append(_to_bytes(row_key))
if range_kwargs:
message.rows.row_ranges.add(**range_kwargs)
return message
|
|
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'Ipv6Io.Nodes.Node.Statistics.Traffic.Ipv6' : {
'meta_info' : _MetaInfoClass('Ipv6Io.Nodes.Node.Statistics.Traffic.Ipv6',
False,
[
_MetaInfoClassMember('bad-header-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Bad Header Packets
''',
'bad_header_packets',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('bad-source-address-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Bad Source Address Packets
''',
'bad_source_address_packets',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('format-errors', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Format Errors
''',
'format_errors',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('forwarded-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Packets Forwarded
''',
'forwarded_packets',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('fragment-count', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Fragmented Packet Count
''',
'fragment_count',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('fragment-failures', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Fragment Failures
''',
'fragment_failures',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('fragmented-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Packets Fragmented
''',
'fragmented_packets',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('fragments', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Fragments
''',
'fragments',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('generated-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Packets Output
''',
'generated_packets',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('hop-count-exceeded-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Hop Count Exceeded Packets
''',
'hop_count_exceeded_packets',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('lisp-decap-errors', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Lisp Decap errors
''',
'lisp_decap_errors',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('lisp-encap-errors', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Lisp Encap errors
''',
'lisp_encap_errors',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('lisp-v4-decap-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Lisp IPv4 Decapped packets
''',
'lisp_v4_decap_packets',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('lisp-v4-encap-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Lisp IPv4 Encapped packets
''',
'lisp_v4_encap_packets',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('lisp-v6-decap-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Lisp IPv6 Decapped packets
''',
'lisp_v6_decap_packets',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('lisp-v6-encap-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Lisp IPv6 Encapped packets
''',
'lisp_v6_encap_packets',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('local-destination-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Local Destination Packets
''',
'local_destination_packets',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('miscellaneous-drops', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Misc. drops
''',
'miscellaneous_drops',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('no-route-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' No Route Packets
''',
'no_route_packets',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('reassembled-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Reassembled Packets
''',
'reassembled_packets',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('reassembly-failures', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Reassembly Failures
''',
'reassembly_failures',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('reassembly-maximum-drops', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Reassembly Reach Maximum Drop
''',
'reassembly_maximum_drops',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('reassembly-timeouts', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Reassembly Timeouts
''',
'reassembly_timeouts',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('received-multicast-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Multicast In
''',
'received_multicast_packets',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('sent-multicast-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Multicast Out
''',
'sent_multicast_packets',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('source-routed-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Packets Source Routed
''',
'source_routed_packets',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('too-big-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Packet Too Big
''',
'too_big_packets',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('total-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total Packets
''',
'total_packets',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('truncated-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Truncated Packets
''',
'truncated_packets',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('unknown-option-type-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Unknown Option Type Packets
''',
'unknown_option_type_packets',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('unknown-protocol-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Unknown Protocol Packets
''',
'unknown_protocol_packets',
'Cisco-IOS-XR-ipv6-io-oper', False),
],
'Cisco-IOS-XR-ipv6-io-oper',
'ipv6',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-io-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_io_oper'
),
},
'Ipv6Io.Nodes.Node.Statistics.Traffic.Icmp' : {
'meta_info' : _MetaInfoClass('Ipv6Io.Nodes.Node.Statistics.Traffic.Icmp',
False,
[
_MetaInfoClassMember('checksum-error-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Checksum Errors
''',
'checksum_error_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('output-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Transmitted
''',
'output_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('received-echo-reply-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Echo Reply Received
''',
'received_echo_reply_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('received-echo-request-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Echo Request Received
''',
'received_echo_request_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('received-hop-count-expired-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Hop Count Expired Received
''',
'received_hop_count_expired_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('received-parameter-error-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Parameter Error Messages Received
''',
'received_parameter_error_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('received-parameter-header-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Parameter Next Header Messages Received
''',
'received_parameter_header_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('received-parameter-option-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Parameter Option Problem Received
''',
'received_parameter_option_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('received-parameter-unknown-type-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Parameter Unknown Type Messages Received
''',
'received_parameter_unknown_type_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('received-reassembly-timeouts', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Reassembly Timeouts
''',
'received_reassembly_timeouts',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('received-too-big-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Too Big Messages Received
''',
'received_too_big_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('received-unknown-timeout-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Unknown Timeout Messages Received
''',
'received_unknown_timeout_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('received-unreachable-address-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Addr Unreachable Received
''',
'received_unreachable_address_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('received-unreachable-admin-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Admin Unreachable Received
''',
'received_unreachable_admin_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('received-unreachable-neighbor-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Host Unreachable Received
''',
'received_unreachable_neighbor_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('received-unreachable-port-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Port Unreachable Received
''',
'received_unreachable_port_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('received-unreachable-routing-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Route Unreachable Received
''',
'received_unreachable_routing_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('received-unreachable-unknown-type-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Unreachable Unknown Messages Received
''',
'received_unreachable_unknown_type_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('sent-echo-reply-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Echo Reply Sent
''',
'sent_echo_reply_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('sent-echo-request-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Echo Request Sent
''',
'sent_echo_request_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('sent-hop-count-expired-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Hop Count Expired Sent
''',
'sent_hop_count_expired_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('sent-parameter-error-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Parameter Error Messages Sent
''',
'sent_parameter_error_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('sent-parameter-header-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Parameter Next Header Messages Sent
''',
'sent_parameter_header_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('sent-parameter-option-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Parameter Option Messages Sent
''',
'sent_parameter_option_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('sent-parameter-unknown-type-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Parameter Unknown Type Messages Sent
''',
'sent_parameter_unknown_type_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('sent-rate-limited-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Sent Packets Ratelimited
''',
'sent_rate_limited_packets',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('sent-reassembly-timeouts', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Reassembly Timeouts
''',
'sent_reassembly_timeouts',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('sent-too-big-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Too Big Messages Sent
''',
'sent_too_big_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('sent-unknown-timeout-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Unknown Timeout Messages Sent
''',
'sent_unknown_timeout_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('sent-unreachable-address-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Addr Unreachable Sent
''',
'sent_unreachable_address_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('sent-unreachable-admin-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Admin Unreachable Sent
''',
'sent_unreachable_admin_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('sent-unreachable-neighbor-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Host Unreachable Sent
''',
'sent_unreachable_neighbor_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('sent-unreachable-port-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Port Unreachable Sent
''',
'sent_unreachable_port_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('sent-unreachable-routing-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Route Unreachable Sent
''',
'sent_unreachable_routing_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('sent-unreachable-unknown-type-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Unreachable Unknown Messages Sent
''',
'sent_unreachable_unknown_type_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('too-short-error-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Too Short Errors
''',
'too_short_error_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('total-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Received
''',
'total_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('unknown-error-type-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Unknown Error
''',
'unknown_error_type_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
],
'Cisco-IOS-XR-ipv6-io-oper',
'icmp',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-io-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_io_oper'
),
},
'Ipv6Io.Nodes.Node.Statistics.Traffic.Ipv6NodeDiscovery' : {
'meta_info' : _MetaInfoClass('Ipv6Io.Nodes.Node.Statistics.Traffic.Ipv6NodeDiscovery',
False,
[
_MetaInfoClassMember('received-neighbor-advertisement-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Neighbor Advertisements Received
''',
'received_neighbor_advertisement_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('received-neighbor-solicitation-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Neighbor Solicitations Received
''',
'received_neighbor_solicitation_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('received-redirect-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Redirect Received
''',
'received_redirect_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('received-router-advertisement-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Router Advertisements Received
''',
'received_router_advertisement_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('received-router-solicitation-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Router Solicitations Received
''',
'received_router_solicitation_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('sent-neighbor-advertisement-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Neighbor Advertisements Sent
''',
'sent_neighbor_advertisement_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('sent-neighbor-solicitation-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Neighbor Solicitations Sent
''',
'sent_neighbor_solicitation_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('sent-redirect-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Redirect Sent
''',
'sent_redirect_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('sent-router-advertisement-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Router Advertisements Sent
''',
'sent_router_advertisement_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('sent-router-solicitation-messages', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ICMP Router Solicitations Sent
''',
'sent_router_solicitation_messages',
'Cisco-IOS-XR-ipv6-io-oper', False),
],
'Cisco-IOS-XR-ipv6-io-oper',
'ipv6-node-discovery',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-io-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_io_oper'
),
},
'Ipv6Io.Nodes.Node.Statistics.Traffic' : {
'meta_info' : _MetaInfoClass('Ipv6Io.Nodes.Node.Statistics.Traffic',
False,
[
_MetaInfoClassMember('icmp', REFERENCE_CLASS, 'Icmp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_io_oper', 'Ipv6Io.Nodes.Node.Statistics.Traffic.Icmp',
[], [],
''' ICMP Statistics
''',
'icmp',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('ipv6', REFERENCE_CLASS, 'Ipv6' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_io_oper', 'Ipv6Io.Nodes.Node.Statistics.Traffic.Ipv6',
[], [],
''' IPv6 Statistics
''',
'ipv6',
'Cisco-IOS-XR-ipv6-io-oper', False),
_MetaInfoClassMember('ipv6-node-discovery', REFERENCE_CLASS, 'Ipv6NodeDiscovery' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_io_oper', 'Ipv6Io.Nodes.Node.Statistics.Traffic.Ipv6NodeDiscovery',
[], [],
''' IPv6 Node Discovery Statistics
''',
'ipv6_node_discovery',
'Cisco-IOS-XR-ipv6-io-oper', False),
],
'Cisco-IOS-XR-ipv6-io-oper',
'traffic',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-io-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_io_oper'
),
},
'Ipv6Io.Nodes.Node.Statistics' : {
'meta_info' : _MetaInfoClass('Ipv6Io.Nodes.Node.Statistics',
False,
[
_MetaInfoClassMember('traffic', REFERENCE_CLASS, 'Traffic' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_io_oper', 'Ipv6Io.Nodes.Node.Statistics.Traffic',
[], [],
''' Traffic statistics for a node
''',
'traffic',
'Cisco-IOS-XR-ipv6-io-oper', False),
],
'Cisco-IOS-XR-ipv6-io-oper',
'statistics',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-io-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_io_oper'
),
},
'Ipv6Io.Nodes.Node' : {
'meta_info' : _MetaInfoClass('Ipv6Io.Nodes.Node',
False,
[
_MetaInfoClassMember('node-name', ATTRIBUTE, 'str' , None, None,
[], ['([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' Node name
''',
'node_name',
'Cisco-IOS-XR-ipv6-io-oper', True),
_MetaInfoClassMember('statistics', REFERENCE_CLASS, 'Statistics' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_io_oper', 'Ipv6Io.Nodes.Node.Statistics',
[], [],
''' Statistical IPv6 network operational data for
a node
''',
'statistics',
'Cisco-IOS-XR-ipv6-io-oper', False),
],
'Cisco-IOS-XR-ipv6-io-oper',
'node',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-io-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_io_oper'
),
},
'Ipv6Io.Nodes' : {
'meta_info' : _MetaInfoClass('Ipv6Io.Nodes',
False,
[
_MetaInfoClassMember('node', REFERENCE_LIST, 'Node' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_io_oper', 'Ipv6Io.Nodes.Node',
[], [],
''' IPv6 network operational data for a particular
node
''',
'node',
'Cisco-IOS-XR-ipv6-io-oper', False),
],
'Cisco-IOS-XR-ipv6-io-oper',
'nodes',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-io-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_io_oper'
),
},
'Ipv6Io' : {
'meta_info' : _MetaInfoClass('Ipv6Io',
False,
[
_MetaInfoClassMember('nodes', REFERENCE_CLASS, 'Nodes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_io_oper', 'Ipv6Io.Nodes',
[], [],
''' Node-specific IPv6 IO operational data
''',
'nodes',
'Cisco-IOS-XR-ipv6-io-oper', False),
],
'Cisco-IOS-XR-ipv6-io-oper',
'ipv6-io',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-io-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_io_oper'
),
},
}
_meta_table['Ipv6Io.Nodes.Node.Statistics.Traffic.Ipv6']['meta_info'].parent =_meta_table['Ipv6Io.Nodes.Node.Statistics.Traffic']['meta_info']
_meta_table['Ipv6Io.Nodes.Node.Statistics.Traffic.Icmp']['meta_info'].parent =_meta_table['Ipv6Io.Nodes.Node.Statistics.Traffic']['meta_info']
_meta_table['Ipv6Io.Nodes.Node.Statistics.Traffic.Ipv6NodeDiscovery']['meta_info'].parent =_meta_table['Ipv6Io.Nodes.Node.Statistics.Traffic']['meta_info']
_meta_table['Ipv6Io.Nodes.Node.Statistics.Traffic']['meta_info'].parent =_meta_table['Ipv6Io.Nodes.Node.Statistics']['meta_info']
_meta_table['Ipv6Io.Nodes.Node.Statistics']['meta_info'].parent =_meta_table['Ipv6Io.Nodes.Node']['meta_info']
_meta_table['Ipv6Io.Nodes.Node']['meta_info'].parent =_meta_table['Ipv6Io.Nodes']['meta_info']
_meta_table['Ipv6Io.Nodes']['meta_info'].parent =_meta_table['Ipv6Io']['meta_info']
|
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jan 20, 2017
@author: alfoa
"""
import os
from glob import glob
import inspect
import xml.etree.ElementTree as ET
import copy
import os
from collections import OrderedDict
class testDescription(object):
"""
Class that handles the checks on the description of the tests
"""
def __init__(self):
"""
Constructor
"""
self.__undescribedFiles ,self.__describedFiles = self.noDescriptionTestsAndInformationOnTheOther()
self.__totTestFiles = len(self.__undescribedFiles) + len(self.__describedFiles.keys())
self.__allDescribed = len(self.__undescribedFiles) == 0
self.__ravenDir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
self.__userPath = os.path.abspath(os.path.join(self.__ravenDir,".."))
def areAllTestDescribed(self):
"""
Method to check if all the tests are described
@ In, None
@ Out, __allDescribed, bool, all described?
"""
return self.__allDescribed
def getFoldersOfUndocumentedTests(self):
"""
Method to get all the folders of tests that contain
undocumented tests
@ In, None
@ Out, undocumentedFolders, list, list containing folders with undocumented tests
"""
undocumentedFolders = []
for testName in self.__undescribedFiles:
dirName = os.path.dirname(testName)
if dirName not in undocumentedFolders: undocumentedFolders.append(dirName)
return undocumentedFolders
def getTotalNumberOfTests(self):
"""
Method to get the number of tests
@ In, None
@ Out, __totTestFiles, int, number of tests
"""
return self.__totTestFiles
def getDescriptionCoverage(self):
"""
Method to get the description coverage in %
@ In, None
@ Out, getDescriptionCoverage, float, percent of description coverage
"""
if self.areAllTestDescribed(): return 100.
else : return (float(len(self.__describedFiles.keys()))/float(self.__totTestFiles))*100
def getUndescribedFileNames(self):
"""
Method to get the list of un-described files
@ In, None
@ Out, __undescribedFiles, list, list of un-described files
"""
return self.__undescribedFiles
def noDescriptionTestsAndInformationOnTheOther(self):
"""
This method returns a dictionary of framework tests (i.e. the ones with an XML
extension and listed in the "tests" files) that have a TestDescription (with
the info contained) and a list of test file names that do not report any
description
@ In, None
@ Out, outputTuple, tuple, tuple (list(file names without a description),
dictionary({'fileName':'xmlNode with the description'}))
"""
__testInfoList = []
__testList = []
filesWithDescription = OrderedDict()
noDescriptionFiles = []
startDir = os.path.join(os.path.dirname(__file__),'../')
for dirr,_,_ in os.walk(startDir):
__testInfoList.extend(glob(os.path.join(dirr,"tests")))
for testInfoFile in __testInfoList:
if 'moose' in testInfoFile.split(os.sep) or not os.path.isfile(testInfoFile):
continue
fileObject = open(testInfoFile,"r+")
fileLines = fileObject.readlines()
dirName = os.path.dirname(testInfoFile)
# I do not want to use getpot!
for line in fileLines:
if line.strip().startswith("input"):
fileName = line.split("=")[-1].replace("'", "").replace('"', '').rstrip().strip()
fileName = os.path.join(dirName,fileName)
if os.path.split(fileName)[-1].lower().endswith('xml'):
__testList.append(os.path.abspath(fileName))
if os.path.split(fileName)[-1].lower().endswith('py'):
__testList.append(os.path.abspath(fileName))
fileObject.close()
for testFile in __testList:
if testFile.endswith('xml'):
try: root = ET.parse(testFile).getroot()
except Exception as e: print('file :'+testFile+'\nXML Parsing error!',e,'\n')
if root.tag != 'Simulation': print('\nThe root node is not Simulation for file '+testFile+'\n')
testInfoNode = root.find("TestInfo")
if testInfoNode is None and root.tag == 'Simulation': noDescriptionFiles.append(testFile)
else: filesWithDescription[testFile] = copy.deepcopy(testInfoNode)
else:
fileLines = open(testFile,"r+").readlines()
xmlPortion = []
startReading = False
for line in fileLines:
if startReading:
xmlPortion.append(line)
if '<TestInfo' in line:
startReading = True
xmlPortion.append("<TestInfo>")
if '</TestInfo' in line:
startReading = False
if len(xmlPortion) >0:
try: testInfoNode = ET.fromstringlist(xmlPortion)
except ET.ParseError as e: print('file :'+testFile+'\nXML Parsing error!',e,'\n')
else : testInfoNode = None
if testInfoNode is None: noDescriptionFiles.append(testFile)
else: filesWithDescription[testFile] = copy.deepcopy(testInfoNode)
outputTuple = noDescriptionFiles, filesWithDescription
return outputTuple
def _fromXmlToLatexDocument(self,xmlNode, fileName):
"""
Template method to construct a latex documentation from a <TestInfo> xml block
@ In, xmlNode, xml.etree.ElementTree, xml node containing the information
@ In, fileName, string, file name of the test
@ Out, output, tuple, tuple(latexString = string representing the latex documentation for this test,
chapterName = the name should be given to the chapter)
"""
descriptionNode = xmlNode.find("description")
authorNode = xmlNode.find("author")
nameNode = xmlNode.find("name")
createdDateNode = xmlNode.find("created")
classTestedNode = xmlNode.find("classesTested")
requirementsNode = xmlNode.find("requirements")
analyticNode = xmlNode.find("analytic")
revisionsNode = xmlNode.find("revisions")
# check
if descriptionNode is not None: description = descriptionNode.text
else : raise IOError("XML node <description> not found for test "+ fileName)
if authorNode is not None : author = authorNode.text
else : raise IOError("XML node <author> not found for test "+ fileName)
if nameNode is not None : name = nameNode.text
else : raise IOError("XML node <name> not found for test "+ fileName)
if createdDateNode is not None: createdDate = createdDateNode.text
else : raise IOError("XML node <created> not found for test "+ fileName)
if classTestedNode is not None: classTested = classTestedNode.text
else : raise IOError("XML node <classesTested> not found for test "+ fileName)
nameChapter = name.replace("/", " ").replace("_", " ").upper()
fileLocation = '.'+fileName.replace(self.__userPath,"")
latexString = "This test can be found at ``\path{"+fileLocation+"}''.\n"
latexString += " This test can be called executing the following command:"
latexString += " \\begin{lstlisting}[language=bash]\n"
latexString += " ./run_tests --re="+name+"\n"
latexString += " \\end{lstlisting}"
latexString += " or \n"
latexString += " \\begin{lstlisting}[language=bash]\n"
latexString += " ./run_framework_tests --re="+name+"\n"
latexString += " \\end{lstlisting}"
latexString += ' \\begin{itemize} \n'
# Test description
latexString += ' \\item Test Description:\n'
latexString += ' \\begin{itemize} \n'
latexString += ' \\item ' +description.strip().replace("_", "\\_").replace("\\\\_", "\\_").replace("#","\#")+'\n'
latexString += ' \\end{itemize} \n'
# is analytical?
if analyticNode is not None:
analyticalDescription = analyticNode.text.replace("_", "\_")
latexString += ' \\item This test is analytic:\n'
latexString += ' \\begin{itemize} \n'
latexString += ' \\item ' +str(analyticalDescription).strip().replace("#","\#")+'\n'
latexString += ' \\end{itemize} \n'
# author
latexString += ' \\item Original Author:\n'
latexString += ' \\begin{itemize} \n'
latexString += ' \\item ' +str(author).strip()+'\n'
latexString += ' \\end{itemize} \n'
# createdDate
latexString += ' \\item Creation date:\n'
latexString += ' \\begin{itemize} \n'
latexString += ' \\item ' +str(createdDate).strip()+'\n'
latexString += ' \\end{itemize} \n'
# classTested
latexString += ' \\item The classes tested in this test are:\n'
latexString += ' \\begin{itemize} \n'
latexString += ' \\item ' +str(classTested).strip()+'\n'
latexString += ' \\end{itemize} \n'
# is requirement?
if requirementsNode is not None:
requirementDescription = requirementsNode.text.split() if "," not in requirementsNode.text else requirementsNode.text.split(",")
latexString += ' \\item This test fulfills the following requirement:\n'
latexString += ' \\begin{itemize} \n'
for req in requirementDescription:
latexString += ' \\item ' +req.strip().replace("#","\#")+'\n'
latexString += ' \\end{itemize} \n'
if revisionsNode is not None and len(revisionsNode) > 0:
latexString += ' \\item Since the creation of this test, the following main revisions have been performed:\n'
latexString += ' \\begin{enumerate} \n'
for child in revisionsNode:
revisionText = str(child.text).strip().replace("_", "\_").replace("#","\#")
revisionAuthor = child.attrib.get('author',"None").strip()
revisionDate = child.attrib.get('date',"None").strip()
latexString += ' \\item revision info:\n'
latexString += ' \\begin{itemize} \n'
latexString += ' \\item author : ' +revisionAuthor+'\n'
latexString += ' \\item date : ' +revisionDate+'\n'
latexString += ' \\item description: ' +revisionText+'\n'
latexString += ' \\end{itemize} \n'
latexString += ' \\end{enumerate} \n'
latexString += ' \\end{itemize} \n'
output = latexString, nameChapter
return output
def splitTestDescription(self):
"""
This method is aimed to create 3 dictionaries of test information:
1) verification tests
2) analytical tests
3) requirement tests
@ In, None
@ Out, tupleOut, tuple, tuple of the 3 dictionaries ( tuple(verificationDict,analyticalDict,requirementDict) )
"""
verificationDict = OrderedDict()
requirementDict = OrderedDict()
analyticalDict = OrderedDict()
for testFileName, xmlNode in self.__describedFiles.items():
if xmlNode is not None:
if xmlNode.find("requirements") is not None:
# requirement
requirementDict[testFileName] = xmlNode
if xmlNode.find("analytic") is not None:
# analytic
analyticalDict[testFileName] = xmlNode
if xmlNode.find("analytic") is None and xmlNode.find("requirements") is None:
# verification
verificationDict[testFileName] = xmlNode
tupleOut = verificationDict, analyticalDict, requirementDict
return tupleOut
def createLatexFile(self, fileName, documentClass = "article", latexPackages=[''], bodyOnly=False):
"""
This method is aimed to create a latex file containing all the information
found in the described tests
@ In, fileName, string, filename (absolute path)
@ In, documentClass, string, latex class document
@ In, latexPackages, list, list of latex packages
@ In, bodyOnly, bool, create a full document or just the document body (\begin{document} to \end{document})
@ Out, None
"""
fileObject = open(fileName,"w+")
if not bodyOnly:
fileObject.write(" \\documentclass{"+documentClass+"}\n")
for packageLatex in latexPackages: fileObject.write(" \\usepackage{"+packageLatex.strip()+"} \n")
fileObject.write(" \\usepackage{hyperref} \n \\usepackage[automark,nouppercase]{scrpage2} \n")
fileObject.write(" \\usepackage[obeyspaces,dvipsnames,svgnames,x11names,table,hyperref]{xcolor} \n")
fileObject.write(" \\usepackage{times} \n \\usepackage[FIGBOTCAP,normal,bf,tight]{subfigure} \n")
fileObject.write(" \\usepackage{amsmath} \n \\usepackage{amssymb} \n")
fileObject.write(" \\usepackage{soul} \n \\usepackage{pifont} \n \\usepackage{enumerate} \n")
fileObject.write(" \\usepackage{listings} \n \\usepackage{fullpage} \n \\usepackage{xcolor} \n")
fileObject.write(" \\usepackage{ifthen} \n \\usepackage{textcomp} \n \\usepackage{mathtools} \n")
fileObject.write(" \\usepackage{relsize} \n \\usepackage{lscape} \n \\usepackage[toc,page]{appendix} \n")
fileObject.write("\n")
fileObject.write(' \\lstdefinestyle{XML} {\n language=XML, \n extendedchars=true, \n breaklines=true, \n breakatwhitespace=true, \n')
fileObject.write(' emphstyle=\color{red}, \n basicstyle=\\ttfamily, \n commentstyle=\\color{gray}\\upshape, \n ')
fileObject.write(' morestring=[b]", \n morecomment=[s]{<?}{?>}, \n morecomment=[s][\color{forestgreen}]{<!--}{-->},')
fileObject.write(' keywordstyle=\\color{cyan}, \n stringstyle=\\ttfamily\color{black}, tagstyle=\color{blue}\\bf \\ttfamily \n }')
fileObject.write(" \\title{RAVEN regression tests' description}\n")
fileObject.write(" \\begin{document} \n \\maketitle \n")
# Introduction
fileObject.write(" \\section{Introduction} \n")
fileObject.write(" This document has been automatically \n")
fileObject.write(" generated by the script ``\\path{raven\developer_tools\createRegressionTestDocumentation.py}''\n")
fileObject.write("Currently there are " + str(descriptionClass.getTotalNumberOfTests()) + "\n")
fileObject.write(" regression tests in the RAVEN framework. The \% of tests that are commented is currently equal to \n"+ str(descriptionClass.getDescriptionCoverage())+" \%.\n")
# Documented tests
fileObject.write("\section{Documented Tests}\n")
fileObject.write("Regression tests for the $Python$ RAVEN framework are found in \path{raven/tests/framework}.\n")
fileObject.write("There is a hierarchy of folders with tests collected by similar testing.\n")
fileObject.write("Every test is described in a special XML node ($<TestInfo>$) within the $<Simulation>$ block.\n")
fileObject.write("An example is reported below:\n")
fileObject.write("\\begin{lstlisting}[style=XML]\n")
fileObject.write("<Simulation>\n")
fileObject.write(" ...\n")
fileObject.write(" <TestInfo>\n")
fileObject.write(" <name>framework/path/to/test/label</name>\n")
fileObject.write(" <author>AuthorGitLabTag</author>\n")
fileObject.write(" <created>YYYY-MM-DD</created>\n")
fileObject.write(" <classesTested>Module.Class, Module.Class</classesTested>\n")
fileObject.write(" <description>\n")
fileObject.write(" Paragraph describing work-flows, modules, classes, entities, etc.,\n")
fileObject.write(" how they are tested, and any other notes\n")
fileObject.write(" </description>\n")
fileObject.write(" <requirements>RequirementsLabel</requirements>\n")
fileObject.write(" <analytic>paragraph description of analytic test</analytic>\n")
fileObject.write(" ...\n")
fileObject.write(" </TestInfo>\n")
fileObject.write(" ...\n")
fileObject.write("</Simulation>\n")
fileObject.write("\\end{lstlisting}\n")
fileObject.write("The $<requirements>$ and $<analytic>$ nodes are optional, for those tests who satisfy an NQA design requirement \n")
fileObject.write("and or have an analytic solution documented in the analytic tests document. Other notes on block contents:\n")
fileObject.write("\\begin{itemize} \n")
fileObject.write(" \\item \\textbf{$<name>$}: this is the test framework path, as well as the name (label) assigned in the tests file block.")
fileObject.write( "This is the path and name that show up when running the tests using the testing harness (\\path{run_tests})\n")
fileObject.write(" \\item \\textbf{$<author>$}: this is the GitLab tag of the author who constructed this test originally, i.e. \\textit{alfoa for @alfoa} \n")
fileObject.write(" \\item \\textbf{$<created>$}: this is the date on which the test was originally created, in year-month-day \\textit{YYYY-MM-DD} XSD date format \n")
fileObject.write(" \\item \\textbf{$<classesTested>$}: a list of the classes tested in the python framework, listed as Entity.Class, i.e. \\textit{Samplers.MonteCarlo} \n")
fileObject.write(" \\item \\textbf{$<description>$}: general notes about what work-flows or other methods are tested \n")
fileObject.write(" \\item \\textbf{$<requirements>$} (optional): lists the NQA requirement that this test satisfies \n")
fileObject.write(" \\item \\textbf{$<analytic>$} (optional): describes the analytic nature of this test and how it is documented in the analytic tests documentation \n")
fileObject.write("\\end{itemize} \n")
fileObject.write("An additional node is optionally available to demonstrate significant revisions to a test: \n")
fileObject.write("\\begin{lstlisting}[style=XML,morekeywords={author,date}]\n")
fileObject.write("<Simulation>\n")
fileObject.write(" ...\n")
fileObject.write(" <TestInfo>\n")
fileObject.write(" ...\n")
fileObject.write(" <revisions>\n")
fileObject.write(" <revision author='AuthorGitLabTag' date='YYYY-MM-DD'>paragraph description of revision</revision>\n")
fileObject.write(" <revision author='AuthorGitLabTag' date='YYYY-MM-DD'>paragraph description of revision</revision>\n")
fileObject.write(" <revisions>\n")
fileObject.write(" ...\n")
fileObject.write(" </TestInfo>\n")
fileObject.write(" ...\n")
fileObject.write("</Simulation>\n")
fileObject.write("\\end{lstlisting}\n")
fileObject.write("The following sub-sections collect all the documented tests. \n")
verificationDict, analyticalDict, requirementDict = self.splitTestDescription()
# list of tests documented
if len(requirementDict.keys()) > 0:
# list the requirement tests
fileObject.write("\subsection{Requirement tests' description}\n")
fileObject.write("\n This section contains the description of all the requirement tests. \n")
for testFileName, xmlNode in requirementDict.items():
latexString, chapterName = self._fromXmlToLatexDocument(xmlNode,testFileName)
fileObject.write("\subsubsection{"+chapterName.strip()+"}\n")
fileObject.write("\n")
fileObject.write(latexString)
if len(analyticalDict.keys()) > 0:
# list the analytical tests
fileObject.write("\subsection{Analytical tests' description}\n")
fileObject.write("\n This section contains the description of all the analytical tests. \n")
for testFileName, xmlNode in analyticalDict.items():
latexString, chapterName = self._fromXmlToLatexDocument(xmlNode,testFileName)
fileObject.write("\subsubsection{"+chapterName.strip()+"}\n")
fileObject.write("\n")
fileObject.write(latexString)
if len(verificationDict.keys()) > 0:
# list the analytical tests
fileObject.write("\subsection{Verification tests' description}\n")
fileObject.write("\n This section contains the description of all the verification tests. \n")
for testFileName, xmlNode in verificationDict.items():
latexString, chapterName = self._fromXmlToLatexDocument(xmlNode,testFileName)
fileObject.write("\subsubsection{"+chapterName.strip()+"}\n")
fileObject.write("\n")
fileObject.write(latexString)
# section regarding undocumented tests
if not self.areAllTestDescribed():
undocumentedFolders = self.getFoldersOfUndocumentedTests()
fileObject.write("\section{Undocumented tests}\n")
fileObject.write("Currently, There are "+str(len(self.__undescribedFiles))+" undocumented tests:\n")
fileObject.write("\\begin{enumerate}\n")
for folderName in undocumentedFolders:
fileObject.write(" \\item Folder: \\path{"+folderName+"}. Tests: \n")
fileObject.write(" \\begin{itemize}\n")
fileNameWithFolderRoot = [fileName for fileName in self.__undescribedFiles if folderName.strip() == os.path.dirname(fileName)]
for fileName in fileNameWithFolderRoot:
fileLocation = '.'+fileName.replace(self.__userPath,"")
fileObject.write(" \\item \\path{"+fileLocation+"} \n")
fileObject.write(" \\end{itemize}\n")
fileObject.write("\\end{enumerate}\n")
fileObject.write("\end{document}")
fileObject.close()
if __name__ == '__main__':
descriptionClass = testDescription()
noDescriptionFiles, filesWithDescription = descriptionClass.noDescriptionTestsAndInformationOnTheOther()
if not descriptionClass.areAllTestDescribed():
print("There are "+str(len(noDescriptionFiles))+" test files without a test description.")
print("Files without test description are:")
for fileName in noDescriptionFiles: print(fileName)
totFile = descriptionClass.getTotalNumberOfTests()
totFileWithDescription = len(filesWithDescription.keys())
print("\nTotal framework test files are : "+str(totFile))
print("\n% of tests that got commented is : "+str(descriptionClass.getDescriptionCoverage())+" %")
print("\nFolders that contain undocumented tests are:\n")
for folderName in descriptionClass.getFoldersOfUndocumentedTests(): print(folderName)
descriptionClass.createLatexFile("regression_tests_documentation_body.tex",bodyOnly=True)
|
|
import types
import threading
from django import http
import amfast
from amfast.remoting import Packet
import amfast.remoting.flex_messages as messaging
from amfast.remoting.channel import HttpChannel, ChannelError
def django_response_wrapper(func):
'''
A decorator which wrap a bare response to a DjangoResopnse
'''
def _(channel, django_request):
response_packet = func(channel, django_request)
if response_packet is None:
return http.HttpResponse(mimetype = channel.CONTENT_TYPE)
elif type(response_packet) is types.GeneratorType:
http_response = http.HttpResponse(content=response_packet, mimetype=channel.CONTENT_TYPE)
return http_response
else:
raise ChannelError('Invalid response type.')
return _
class DjangoChannel(HttpChannel):
"""A channel that works with Django."""
# Attribute that holds Django's
# request object, so that it can
# be accessed from a target.
DJANGO_REQUEST = '_django_request'
def __call__(self, http_request):
if http_request.method != 'POST':
return http.HttpResponseNotAllowed(['POST'])
try:
request_packet = self.decode(http_request.raw_post_data)
setattr(request_packet, self.DJANGO_REQUEST, http_request)
except amfast.AmFastError, exc:
return http.HttpResponseBadRequest(mimetype='text/plain', content=self.getBadEncodingMsg())
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
return http.HttpResponseServerError(mimetype='text/plain', content=self.getBadServerMsg())
try:
response_packet = self.invoke(request_packet)
raw_response = self.encode(response_packet)
http_response = http.HttpResponse(mimetype=self.CONTENT_TYPE)
http_response['Content-Length'] = str(len(raw_response))
http_response.write(raw_response)
return http_response
except amfast.AmFastError, exc:
return http.HttpResponseServerError(mimetype='text/plain', content=self.getBadServerMsg())
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
return http.HttpResponseServerError(mimetype='text/plain', content=self.getBadServerMsg())
class StreamingDjangoChannel(DjangoChannel):
"""Experimental support for streaming with Django."""
def __init__(self, name, max_connections=-1, endpoint=None,
wait_interval=0, heart_interval=30000):
DjangoChannel.__init__(self, name, max_connections=max_connections,
endpoint=endpoint, wait_interval=wait_interval)
self.heart_interval = heart_interval
def __call__(self, http_request):
if http_request.META['CONTENT_TYPE'] == self.CONTENT_TYPE:
return DjangoChannel.__call__(self, http_request)
try:
body = http_request.raw_post_data
msg = messaging.StreamingMessage()
msg.parseBody(body)
#django has a well wrapped http_request object which contents all the wsgi options
msg.parseParams(http_request.META['QUERY_STRING'])
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
if msg.operation == msg.OPEN_COMMAND:
return self.startStream(msg)
elif msg.operation == msg.CLOSE_COMMAND:
return self.stopStream(msg)
raise ChannelError('Http streaming operation unknown: %s' % msg.operation)
@django_response_wrapper
def startStream(self, msg):
try:
connection = self.channel_set.connection_manager.getConnection(msg.headers.get(msg.FLEX_CLIENT_ID_HEADER))
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
raise ChannelError('Http streaming operation unknown: %s' % msg.operation)
try:
timer = threading.Timer(float(self.heart_interval) / 1000, self.beat, (connection, ))
timer.daemon = True
timer.start()
inited = False
event = threading.Event()
connection.setNotifyFunc(event.set)
poll_secs = float(self.poll_interval) / 1000
while True:
if connection.connected is False:
msg = messaging.StreamingMessage.getDisconnectMsg()
try:
yield messaging.StreamingMessage.prepareMsg(msg, self.endpoint)
finally:
# Client may have already disconnected
return
if inited is False:
# Send acknowledge message
response = msg.acknowledge()
response.body = connection.id
bytes = messaging.StreamingMessage.prepareMsg(response, self.endpoint)
inited = True
bytes += chr(messaging.StreamingMessage.NULL_BYTE) * self.KICKSTART_BYTES
yield bytes
if self.channel_set.notify_connections is True:
# Block until notification of new message
event.wait()
else:
# Block until poll_interval is reached
event.wait(poll_secs)
# Message has been published,
# or it's time for a heart beat
# Remove notify_func so that
# New messages don't trigger event.
connection.unSetNotifyFunc()
msgs = self.channel_set.subscription_manager.pollConnection(connection)
if len(msgs) > 0:
while len(msgs) > 0:
# Dispatch all messages to client
for msg in msgs:
try:
bytes = messaging.StreamingMessage.prepareMsg(msg, self.endpoint)
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
self.channel_set.disconnect(connection)
break
try:
yield bytes
# return bytes
except (KeyboardInterrupt, SystemExit):
raise
except:
# Client has disconnected
self.channel_set.disconnect(connection)
return
msgs = self.channel_set.subscription_manager.pollConnection(connection)
else:
# Send heart beat
try:
yield chr(messaging.StreamingMessage.NULL_BYTE)
except (KeyboardInterrupt, SystemExit):
raise
except:
# Client has disconnected
self.channel_set.disconnect(connection)
return
# Create new event to trigger new messages or heart beats
event = threading.Event()
connection.setNotifyFunc(event.set)
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
self.channel_set.disconnect(connection)
return
@django_response_wrapper
def stopStream(self, msg):
"""Stop a streaming connection."""
connection = self.channel_set.connection_manager.getConnection(msg.headers.get(msg.FLEX_CLIENT_ID_HEADER))
connection.disconnect()
if hasattr(connection, "notify_func") and connection.notify_func is not None:
connection.notify_func()
@django_response_wrapper
def beat(self, connection):
"""Send a heart beat."""
if hasattr(connection, "notify_func") and connection.notify_func is not None:
connection.notify_func()
else:
return
# Create timer for next beat
timer = threading.Timer(float(self.heart_interval) / 1000, self.beat, (connection, ))
timer.daemon = True
timer.start()
|
|
import pickle
import json
import pandas as pd
import numpy as np
from .store import exists_key_store, get_key_store
from .dataset import get_dataset_list, get_dataset_folder, get_dataset
from .prepare import get_idx_train_test, get_eval_sets
def get_importance(dataset_id, round_id):
"""
features importance of the model
:param dataset_id: id of the dataset
:param round_id: id of the round
:return: feature importance as a dataframe
"""
try:
return pickle.load(open(get_dataset_folder(dataset_id) + '/features/%s.pkl' % round_id, 'rb'))
except:
return None
def get_pred_eval_test(dataset_id, round_id):
"""
prediction on eval set & test & submit set
:param dataset_id: id of the dataset
:param round_id: id of the round
:return: list of predictions for eval set, test and submit set
"""
return pickle.load(open(get_dataset_folder(dataset_id) + '/predict/%s.pkl' % round_id, 'rb'))
def print_value(x):
# easy print function for dictionary value
return ('%6.4f' % x).rstrip('0').rstrip('.') if isinstance(x, float) else str(x)
def get_home_best():
# get the list of datasets with their best results
datasets = get_dataset_list(include_results=True)[::-1]
for dt in datasets:
if dt.status != 'created':
best = get_best_models(dt.dataset_id)
if len(best) > 0:
best = best[0]
dt.best_round_id = best['round_id']
dt.best_model_name = best['model_name']
dt.best_score_eval = best['score_eval']
dt.best_score_test = best['score_test']
dt.best_cv_mean = best['cv_mean']
dt.best_cv_std = best['cv_std']
dt.best_cv_max = best['cv_max']
return datasets
def get_best_models(dataset_id):
# get the best results per model
key = 'dataset:%s:best' % dataset_id
if exists_key_store(key):
return get_key_store(key)
else:
return []
def get_best_pp(dataset_id):
# get the best results per pre-processing
key = 'dataset:%s:best_pp' % dataset_id
if exists_key_store(key):
return get_key_store(key)
else:
return []
def get_best_details(df, model_name):
# get the best results for a model
if len(df[(df.model_name == model_name) & df.cv]) > 0:
best = df[(df.model_name == model_name) & df.cv].sort_values(by='cv_mean')
else:
best = df[df.model_name == model_name].sort_values(by='cv_mean')
# create params detailed dataframe
params = []
for p, round_id in zip(best.model_params.values, best.round_id.values):
params.append({**{'round_id': round_id}, **p})
params = pd.DataFrame(params)
if len(params) > 1:
to_drop = []
# remove cols with 1 unique value
for col in params.columns:
l = params[col].map(str).unique()
if len(l) <= 1:
to_drop.append(col)
if len(to_drop) > 0:
params.drop(to_drop, axis=1, inplace=True)
# strip underscores in column names
new_col = []
for col in params.columns:
if col != 'round_id':
new_col.append(col.replace('_', ' '))
else:
new_col.append(col)
params.columns = new_col
# round floating values
for col in params.columns:
if col != 'round_id':
params[col] = params[col].fillna('').map(print_value)
# params.fillna('', inplace=True)
best = pd.merge(best, params, on='round_id')
# relative performance
best['rel_score'] = abs(100 * (best['cv_max'] - best['cv_max'].max()) / (best['cv_max'].max() - best['cv_max'].min()))
return [col for col in params.columns if col != 'round_id'], best
def __select_process(process, pipeline):
# select the element in the pipeline with category c
for p in pipeline:
if p[2] == process:
return p
return '', '', '', ''
def get_best_details_pp(df0, process_name):
# get the best results for a model
if len(df0[df0.cv]) > 0:
df = df0[df0.cv]
else:
df = df0
df['is_selected'] = df.pipeline.map(lambda x: __select_process(process_name, x)[2] != '')
df = df[df.is_selected]
df['cat_ref'] = df['pipeline'].map(lambda x: __select_process(process_name, x)[0])
df['cat_name'] = df['pipeline'].map(lambda x: __select_process(process_name, x)[1])
df['cat_process'] = df['pipeline'].map(lambda x: __select_process(process_name, x)[2])
df['cat_params'] = df['pipeline'].map(lambda x: __select_process(process_name, x)[3])
best = df.sort_values(by='cv_max')
# create params detailed dataframe
params = []
for p, round_id in zip(best.cat_params.values, best.round_id.values):
params.append({**{'round_id': round_id}, **p})
params = pd.DataFrame(params)
if len(params) > 1:
to_drop = []
# remove cols with 1 unique value
for col in params.columns:
l = params[col].map(str).unique()
if len(l) <= 1:
to_drop.append(col)
if len(to_drop) > 0:
params.drop(to_drop, axis=1, inplace=True)
# strip underscores in column names
new_col = []
for col in params.columns:
if col != 'round_id':
new_col.append(col.replace('_', ' '))
else:
new_col.append(col)
params.columns = new_col
# round floating values
for col in params.columns:
if col != 'round_id':
params[col] = params[col].fillna('').map(print_value)
# params.fillna('', inplace=True)
best = pd.merge(best, params, on='round_id')
# relative performance
best['rel_score'] = abs(100 * (best['cv_max'] - best['cv_max'].max()) / (best['cv_max'].max() - best['cv_max'].min()))
return [col for col in params.columns if col != 'round_id'], best
def get_data_steps(process):
# generate a list of process steps from the json description
steps = []
if isinstance(process, dict):
for step_name in process.keys():
params = process[step_name]
steps.append((step_name, [(key, params[key]) for key in params.keys()]))
return steps
def get_feature_steps(process_name, params):
# generate a list of process steps from the json description
return (process_name, [(key, params[key]) for key in params.keys()])
def get_round_params(df, round_id):
# details for a round
round = df[df.round_id == int(round_id)]
params = round.model_params.values[0].copy()
for key in params.keys():
params[key] = print_value(params[key])
return params
def get_feature_importance(dataset_id, round_id):
# get feature importance for the selected model round
df = get_importance(dataset_id, round_id)
if not isinstance(df, pd.DataFrame) or 'importance' not in df.columns:
return []
df['pct_importance'] = np.round(100 * df.importance / df.importance.sum(), 1)
df['rel_importance'] = np.round(100 * df.importance / df.importance.max(), 1)
return df.sort_values('importance', ascending=False).to_dict(orient='records')
def create_predict_file(dataset_id, round_id):
"""
generate a prediction file in excel format on eval set, with original data and prediction
:param dataset_id: dataset id
:param round_id: round id
:return: None
"""
dataset = get_dataset(dataset_id)
# load original train data and indexes
df = dataset.get_data()
i_train, i_test = get_idx_train_test(dataset_id)
df = df.iloc[i_train]
ds = get_eval_sets(dataset_id)
# load prediction results
y_pred_eval, y_pred_test, y_pred_submit = get_pred_eval_test(dataset_id, round_id)
cols = list(df.columns)
if dataset.problem_type == 'regression':
df['_predict'] = y_pred_eval
df['_actual'] = ds.y_train
df['_delta'] = df['_actual'] - df['_predict']
df = df[['_predict', '_actual', '_delta'] + cols]
else:
df['_predict'] = np.argmax(y_pred_eval, axis=1)
df['_p_class'] = df['_predict'].map(lambda x: dataset.y_class_names[x])
df['_actual'] = ds.y_train
df['_a_class'] = df['_actual'].map(lambda x: dataset.y_class_names[x])
df['_delta'] = df['_actual'] != df['_predict']
df = df[['_predict', '_actual', '_delta', '_p_class', '_a_class'] + cols]
# save as excel file
filename = get_dataset_folder(dataset_id) + '/submit/predict_%s.xlsx' % round_id
df.to_excel(filename, index=False)
return filename
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.aiplatform_v1.types import index_endpoint
from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint
from google.cloud.aiplatform_v1.types import index_endpoint_service
from google.longrunning import operations_pb2 # type: ignore
from .base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import IndexEndpointServiceGrpcTransport
class IndexEndpointServiceGrpcAsyncIOTransport(IndexEndpointServiceTransport):
"""gRPC AsyncIO backend transport for IndexEndpointService.
A service for managing Vertex AI's IndexEndpoints.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def create_index_endpoint(
self,
) -> Callable[
[index_endpoint_service.CreateIndexEndpointRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the create index endpoint method over gRPC.
Creates an IndexEndpoint.
Returns:
Callable[[~.CreateIndexEndpointRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_index_endpoint" not in self._stubs:
self._stubs["create_index_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.IndexEndpointService/CreateIndexEndpoint",
request_serializer=index_endpoint_service.CreateIndexEndpointRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_index_endpoint"]
@property
def get_index_endpoint(
self,
) -> Callable[
[index_endpoint_service.GetIndexEndpointRequest],
Awaitable[index_endpoint.IndexEndpoint],
]:
r"""Return a callable for the get index endpoint method over gRPC.
Gets an IndexEndpoint.
Returns:
Callable[[~.GetIndexEndpointRequest],
Awaitable[~.IndexEndpoint]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_index_endpoint" not in self._stubs:
self._stubs["get_index_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.IndexEndpointService/GetIndexEndpoint",
request_serializer=index_endpoint_service.GetIndexEndpointRequest.serialize,
response_deserializer=index_endpoint.IndexEndpoint.deserialize,
)
return self._stubs["get_index_endpoint"]
@property
def list_index_endpoints(
self,
) -> Callable[
[index_endpoint_service.ListIndexEndpointsRequest],
Awaitable[index_endpoint_service.ListIndexEndpointsResponse],
]:
r"""Return a callable for the list index endpoints method over gRPC.
Lists IndexEndpoints in a Location.
Returns:
Callable[[~.ListIndexEndpointsRequest],
Awaitable[~.ListIndexEndpointsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_index_endpoints" not in self._stubs:
self._stubs["list_index_endpoints"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.IndexEndpointService/ListIndexEndpoints",
request_serializer=index_endpoint_service.ListIndexEndpointsRequest.serialize,
response_deserializer=index_endpoint_service.ListIndexEndpointsResponse.deserialize,
)
return self._stubs["list_index_endpoints"]
@property
def update_index_endpoint(
self,
) -> Callable[
[index_endpoint_service.UpdateIndexEndpointRequest],
Awaitable[gca_index_endpoint.IndexEndpoint],
]:
r"""Return a callable for the update index endpoint method over gRPC.
Updates an IndexEndpoint.
Returns:
Callable[[~.UpdateIndexEndpointRequest],
Awaitable[~.IndexEndpoint]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_index_endpoint" not in self._stubs:
self._stubs["update_index_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.IndexEndpointService/UpdateIndexEndpoint",
request_serializer=index_endpoint_service.UpdateIndexEndpointRequest.serialize,
response_deserializer=gca_index_endpoint.IndexEndpoint.deserialize,
)
return self._stubs["update_index_endpoint"]
@property
def delete_index_endpoint(
self,
) -> Callable[
[index_endpoint_service.DeleteIndexEndpointRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the delete index endpoint method over gRPC.
Deletes an IndexEndpoint.
Returns:
Callable[[~.DeleteIndexEndpointRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_index_endpoint" not in self._stubs:
self._stubs["delete_index_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.IndexEndpointService/DeleteIndexEndpoint",
request_serializer=index_endpoint_service.DeleteIndexEndpointRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_index_endpoint"]
@property
def deploy_index(
self,
) -> Callable[
[index_endpoint_service.DeployIndexRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the deploy index method over gRPC.
Deploys an Index into this IndexEndpoint, creating a
DeployedIndex within it.
Only non-empty Indexes can be deployed.
Returns:
Callable[[~.DeployIndexRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "deploy_index" not in self._stubs:
self._stubs["deploy_index"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.IndexEndpointService/DeployIndex",
request_serializer=index_endpoint_service.DeployIndexRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["deploy_index"]
@property
def undeploy_index(
self,
) -> Callable[
[index_endpoint_service.UndeployIndexRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the undeploy index method over gRPC.
Undeploys an Index from an IndexEndpoint, removing a
DeployedIndex from it, and freeing all resources it's
using.
Returns:
Callable[[~.UndeployIndexRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "undeploy_index" not in self._stubs:
self._stubs["undeploy_index"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.IndexEndpointService/UndeployIndex",
request_serializer=index_endpoint_service.UndeployIndexRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["undeploy_index"]
@property
def mutate_deployed_index(
self,
) -> Callable[
[index_endpoint_service.MutateDeployedIndexRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the mutate deployed index method over gRPC.
Update an existing DeployedIndex under an
IndexEndpoint.
Returns:
Callable[[~.MutateDeployedIndexRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_deployed_index" not in self._stubs:
self._stubs["mutate_deployed_index"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.IndexEndpointService/MutateDeployedIndex",
request_serializer=index_endpoint_service.MutateDeployedIndexRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["mutate_deployed_index"]
def close(self):
return self.grpc_channel.close()
__all__ = ("IndexEndpointServiceGrpcAsyncIOTransport",)
|
|
import numpy as np
from PySide import QtGui, QtCore
from PySide.QtOpenGL import *
import sharppy.sharptab as tab
from sharppy.sharptab.constants import *
## routine written by Kelton Halbert - OU School of Meteorology
## [email protected]
__all__ = ['backgroundThetae', 'plotThetae']
class backgroundThetae(QtGui.QFrame):
'''
Draw the background frame and lines for the Theta-E plot.
Draws the background on a QPixmap.
Inherits a QtGui.QFrame Object
'''
def __init__(self):
super(backgroundThetae, self).__init__()
self.initUI()
def initUI(self):
'''
Initializes window variables and the QPixmap
that gets drawn on.
'''
## window configuration settings,
## sich as padding, width, height, and
## min/max plot axes
self.lpad = 0; self.rpad = 0
self.tpad = 0; self.bpad = 20
self.wid = self.size().width() - self.rpad
self.hgt = self.size().height() - self.bpad
self.tlx = self.rpad; self.tly = self.tpad
self.brx = self.wid; self.bry = self.hgt
## what are the minimum/maximum values expected
## for the data? This is used when converting
## to pixel coordinates.
self.pmax = 1025.; self.pmin = 400.
self.tmax = 360.; self.tmin = 300.
## do a DPI check for the font size
if self.physicalDpiX() > 75:
fsize = 6
else:
fsize = 7
self.label_font = QtGui.QFont('Helvetica', fsize)
## initialize the QPixmap
self.plotBitMap = QtGui.QPixmap(self.width(), self.height())
self.clear()
## and draw the background
self.plotBackground()
def resizeEvent(self, e):
'''
Handles the event the window is resized.
Parameters
----------
e: an Event object
'''
self.initUI()
def plotBackground(self):
'''
Handles the drawing of the background onto
the QPixmap.
'''
## initialize a painter object and draw the frame
qp = QtGui.QPainter()
qp.begin(self.plotBitMap)
qp.setRenderHint(qp.Antialiasing)
qp.setRenderHint(qp.TextAntialiasing)
self.draw_frame(qp)
## draw the isobar ticks and the theta-e ticks
for p in [1000, 900, 800, 700, 600, 500]:
self.draw_isobar(p, qp)
for t in np.arange( 200, 400, 10):
self.draw_thetae(t, qp)
qp.end()
def clear(self):
'''
Clear the widget
'''
self.plotBitMap.fill(QtCore.Qt.black)
def draw_frame(self, qp):
'''
Draw the background frame.
Parameters
----------
qp: QtGui.QPainter object
'''
## set a new pen to draw with
pen = QtGui.QPen(QtCore.Qt.white, 2, QtCore.Qt.SolidLine)
qp.setPen(pen)
## draw the borders in white
qp.drawLine(self.tlx, self.tly, self.brx, self.tly)
qp.drawLine(self.brx, self.tly, self.brx, self.bry)
qp.drawLine(self.brx, self.bry, self.tlx, self.bry)
qp.drawLine(self.tlx, self.bry, self.tlx, self.tly)
qp.setFont(self.label_font)
## draw the plot name on the background
qp.drawText(35, 15, 50, 50,
QtCore.Qt.AlignVCenter | QtCore.Qt.AlignHCenter,
'Theta-E\nv.\nPres')
def draw_isobar(self, p, qp):
'''
Draw background isobar ticks.
Parameters
----------
p: pressure in hPa or mb
qp: QtGui.QPainter object
'''
## set a new pen with a white color and solid line of thickness 1
pen = QtGui.QPen(QtGui.QColor(WHITE), 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.setFont(self.label_font)
## convert the pressure to pixel coordinates
y1 = self.pres_to_pix(p)
## length of line to draw
offset = 5
## draw the isobar line and text
qp.drawLine(self.lpad, y1, self.lpad+offset, y1)
qp.drawLine(self.brx+self.rpad-offset, y1,
self.brx+self.rpad, y1)
qp.drawText(0, y1-20, 20, 40,
QtCore.Qt.AlignVCenter | QtCore.Qt.AlignRight,
tab.utils.INT2STR(p))
def draw_thetae(self, t, qp):
'''
Draw background Theta-E ticks.
Parameters
----------
t: Theta-E in degrees Kelvin
qp: QtGui.QPainter object
'''
## set a new pen with a white color, thickness one, solid line
pen = QtGui.QPen(QtGui.QColor(WHITE), 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.setFont(self.label_font)
## convert theta-E to pixel values
x1 = self.theta_to_pix(t)
## length of tick to draw
offset = 5
## draw the tick and label it with a value
qp.drawLine(x1, 0, x1, 0+offset)
qp.drawLine(x1, self.bry+self.tpad-offset,
x1, self.bry+self.rpad)
qp.drawText(x1, self.bry-20, 15, 20,
QtCore.Qt.AlignTop | QtCore.Qt.AlignCenter, tab.utils.INT2STR(t))
def pres_to_pix(self, p):
'''
Function to convert a pressure value (hPa) to a Y pixel.
Parameters
----------
p: pressure in hPa or mb
'''
scl1 = self.pmax - self.pmin
scl2 = self.pmax - p
return self.bry - (scl2 / scl1) * (self.bry - self.tpad)
def theta_to_pix(self, t):
'''
Function to convert a Theta-E value (K) to a X pixel.
Parameters
----------
t: temperature in Kelvin
'''
scl1 = self.tmax - self.tmin
scl2 = self.tmax - t
return self.bry - (scl2 / scl1) * (self.bry - self.rpad)
class plotThetae(backgroundThetae):
'''
Draws the theta-E window. Inherits from the backgroundThetae
class that handles plotting of the frame. Draws the contours
to the QPixmap inherited by the backgroundThetae class.
'''
def __init__(self):
'''
Initializes the data needed from the Profile object.
Parameters
----------
prof: a Profile object
'''
super(plotThetae, self).__init__()
## set the varables for pressure and thetae
self.prof = None
def setProf(self, prof):
self.prof = prof
self.thetae = prof.thetae
self.pres = prof.pres
idx = np.where( self.pres > 400. )[0]
self.tmin = self.thetae[idx].min() - 10.
self.tmax = self.thetae[idx].max() + 10.
self.clear()
self.plotBackground()
self.plotData()
self.update()
def resizeEvent(self, e):
'''
Handles when the window is resized.
Parameters
----------
e: an Event object
'''
super(plotThetae, self).resizeEvent(e)
if self.prof is not None:
idx = np.where( self.pres > 400. )[0]
self.tmin = self.thetae[idx].min() - 10.
self.tmax = self.thetae[idx].max() + 10.
self.update()
self.plotData()
def paintEvent(self, e):
'''
Draws the QPixmap onto the QWidget.
Parameters
----------
e: an Event object
'''
super(plotThetae, self).paintEvent(e)
qp = QtGui.QPainter()
qp.begin(self)
qp.drawPixmap(0, 0, self.plotBitMap)
qp.end()
def plotData(self):
'''
Plots the data onto the QPixmap.
'''
if self.prof is None:
return
## this function handles painting the plot
## create a new painter obkect
qp = QtGui.QPainter()
qp.begin(self.plotBitMap)
qp.setRenderHint(qp.Antialiasing)
qp.setRenderHint(qp.TextAntialiasing)
## draw the theta-e profile
self.draw_profile(qp)
## end the painter
qp.end()
def draw_profile(self, qp):
'''
Draw the Theta-E v. Pres profile.
Parameters
----------
qp: QtGui.QPainter object
'''
pen = QtGui.QPen(QtGui.QColor(RED), 2)
pen.setStyle(QtCore.Qt.SolidLine)
mask1 = self.thetae.mask
mask2 = self.pres.mask
mask = np.maximum(mask1, mask2)
pres = self.pres[~mask]
thetae = self.thetae[~mask]
for i in xrange( pres.shape[0] - 1 ):
## we really only want to plot the data in the lowest 500mb
if pres[i] > 400:
## get two pressure, temperature, and dewpoint values
p1 = pres[i]; p2 = pres[i+1]
## get two theta-e values from the above sounding profile data
thte1 = thetae[i]; thte2 = thetae[i+1]
## convert the theta-e values to x pixel coordinates
## and the pressure values to y pixel coordinates
x1 = self.theta_to_pix(thte1); x2 = self.theta_to_pix(thte2)
y1 = self.pres_to_pix(p1); y2 = self.pres_to_pix(p2)
## set the pen and draw a line between the two points
qp.setPen(pen)
qp.drawLine(x1, y1, x2, y2)
|
|
import numpy as np
import time
import gym
import queue
import ray
from ray.rllib.agents.ppo.ppo_tf_policy import PPOTFPolicy
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.execution.concurrency_ops import Concurrently, Enqueue, Dequeue
from ray.rllib.execution.metric_ops import StandardMetricsReporting
from ray.rllib.execution.replay_ops import StoreToReplayBuffer, Replay
from ray.rllib.execution.rollout_ops import ParallelRollouts, AsyncGradients, \
ConcatBatches, StandardizeFields
from ray.rllib.execution.train_ops import TrainOneStep, ComputeGradients, \
AverageGradients
from ray.rllib.execution.replay_buffer import LocalReplayBuffer, \
ReplayActor
from ray.rllib.policy.sample_batch import SampleBatch
from ray.util.iter import LocalIterator, from_range
from ray.util.iter_metrics import SharedMetrics
def iter_list(values):
return LocalIterator(lambda _: values, SharedMetrics())
def make_workers(n):
local = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=PPOTFPolicy,
rollout_fragment_length=100)
remotes = [
RolloutWorker.as_remote().remote(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=PPOTFPolicy,
rollout_fragment_length=100) for _ in range(n)
]
workers = WorkerSet._from_existing(local, remotes)
return workers
def test_concurrently(ray_start_regular_shared):
a = iter_list([1, 2, 3])
b = iter_list([4, 5, 6])
c = Concurrently([a, b], mode="round_robin")
assert c.take(6) == [1, 4, 2, 5, 3, 6]
a = iter_list([1, 2, 3])
b = iter_list([4, 5, 6])
c = Concurrently([a, b], mode="async")
assert c.take(6) == [1, 4, 2, 5, 3, 6]
def test_concurrently_weighted(ray_start_regular_shared):
a = iter_list([1, 1, 1])
b = iter_list([2, 2, 2])
c = iter_list([3, 3, 3])
c = Concurrently(
[a, b, c], mode="round_robin", round_robin_weights=[3, 1, 2])
assert c.take(9) == [1, 1, 1, 2, 3, 3, 2, 3, 2]
a = iter_list([1, 1, 1])
b = iter_list([2, 2, 2])
c = iter_list([3, 3, 3])
c = Concurrently(
[a, b, c], mode="round_robin", round_robin_weights=[1, 1, "*"])
assert c.take(9) == [1, 2, 3, 3, 3, 1, 2, 1, 2]
def test_concurrently_output(ray_start_regular_shared):
a = iter_list([1, 2, 3])
b = iter_list([4, 5, 6])
c = Concurrently([a, b], mode="round_robin", output_indexes=[1])
assert c.take(6) == [4, 5, 6]
a = iter_list([1, 2, 3])
b = iter_list([4, 5, 6])
c = Concurrently([a, b], mode="round_robin", output_indexes=[0, 1])
assert c.take(6) == [1, 4, 2, 5, 3, 6]
def test_enqueue_dequeue(ray_start_regular_shared):
a = iter_list([1, 2, 3])
q = queue.Queue(100)
a.for_each(Enqueue(q)).take(3)
assert q.qsize() == 3
assert q.get_nowait() == 1
assert q.get_nowait() == 2
assert q.get_nowait() == 3
q.put("a")
q.put("b")
q.put("c")
a = Dequeue(q)
assert a.take(3) == ["a", "b", "c"]
def test_metrics(ray_start_regular_shared):
workers = make_workers(1)
workers.foreach_worker(lambda w: w.sample())
a = from_range(10, repeat=True).gather_sync()
b = StandardMetricsReporting(
a, workers, {
"min_iter_time_s": 2.5,
"timesteps_per_iteration": 0,
"metrics_smoothing_episodes": 10,
"collect_metrics_timeout": 10,
})
start = time.time()
res1 = next(b)
assert res1["episode_reward_mean"] > 0, res1
res2 = next(b)
assert res2["episode_reward_mean"] > 0, res2
assert time.time() - start > 2.4
workers.stop()
def test_rollouts(ray_start_regular_shared):
workers = make_workers(2)
a = ParallelRollouts(workers, mode="bulk_sync")
assert next(a).count == 200
counters = a.shared_metrics.get().counters
assert counters["num_steps_sampled"] == 200, counters
a = ParallelRollouts(workers, mode="async")
assert next(a).count == 100
counters = a.shared_metrics.get().counters
assert counters["num_steps_sampled"] == 100, counters
workers.stop()
def test_rollouts_local(ray_start_regular_shared):
workers = make_workers(0)
a = ParallelRollouts(workers, mode="bulk_sync")
assert next(a).count == 100
counters = a.shared_metrics.get().counters
assert counters["num_steps_sampled"] == 100, counters
workers.stop()
def test_concat_batches(ray_start_regular_shared):
workers = make_workers(0)
a = ParallelRollouts(workers, mode="async")
b = a.combine(ConcatBatches(1000))
assert next(b).count == 1000
timers = b.shared_metrics.get().timers
assert "sample" in timers
def test_standardize(ray_start_regular_shared):
workers = make_workers(0)
a = ParallelRollouts(workers, mode="async")
b = a.for_each(StandardizeFields(["t"]))
batch = next(b)
assert abs(np.mean(batch["t"])) < 0.001, batch
assert abs(np.std(batch["t"]) - 1.0) < 0.001, batch
def test_async_grads(ray_start_regular_shared):
workers = make_workers(2)
a = AsyncGradients(workers)
res1 = next(a)
assert isinstance(res1, tuple) and len(res1) == 2, res1
counters = a.shared_metrics.get().counters
assert counters["num_steps_sampled"] == 100, counters
workers.stop()
def test_train_one_step(ray_start_regular_shared):
workers = make_workers(0)
a = ParallelRollouts(workers, mode="bulk_sync")
b = a.for_each(TrainOneStep(workers))
batch, stats = next(b)
assert isinstance(batch, SampleBatch)
assert "default_policy" in stats
assert "learner_stats" in stats["default_policy"]
counters = a.shared_metrics.get().counters
assert counters["num_steps_sampled"] == 100, counters
assert counters["num_steps_trained"] == 100, counters
timers = a.shared_metrics.get().timers
assert "learn" in timers
workers.stop()
def test_compute_gradients(ray_start_regular_shared):
workers = make_workers(0)
a = ParallelRollouts(workers, mode="bulk_sync")
b = a.for_each(ComputeGradients(workers))
grads, counts = next(b)
assert counts == 100, counts
timers = a.shared_metrics.get().timers
assert "compute_grads" in timers
def test_avg_gradients(ray_start_regular_shared):
workers = make_workers(0)
a = ParallelRollouts(workers, mode="bulk_sync")
b = a.for_each(ComputeGradients(workers)).batch(4)
c = b.for_each(AverageGradients())
grads, counts = next(c)
assert counts == 400, counts
def test_store_to_replay_local(ray_start_regular_shared):
buf = LocalReplayBuffer(
num_shards=1,
learning_starts=200,
buffer_size=1000,
replay_batch_size=100,
prioritized_replay_alpha=0.6,
prioritized_replay_beta=0.4,
prioritized_replay_eps=0.0001)
assert buf.replay() is None
workers = make_workers(0)
a = ParallelRollouts(workers, mode="bulk_sync")
b = a.for_each(StoreToReplayBuffer(local_buffer=buf))
next(b)
assert buf.replay() is None # learning hasn't started yet
next(b)
assert buf.replay().count == 100
replay_op = Replay(local_buffer=buf)
assert next(replay_op).count == 100
def test_store_to_replay_actor(ray_start_regular_shared):
actor = ReplayActor.remote(
num_shards=1,
learning_starts=200,
buffer_size=1000,
replay_batch_size=100,
prioritized_replay_alpha=0.6,
prioritized_replay_beta=0.4,
prioritized_replay_eps=0.0001)
assert ray.get(actor.replay.remote()) is None
workers = make_workers(0)
a = ParallelRollouts(workers, mode="bulk_sync")
b = a.for_each(StoreToReplayBuffer(actors=[actor]))
next(b)
assert ray.get(actor.replay.remote()) is None # learning hasn't started
next(b)
assert ray.get(actor.replay.remote()).count == 100
replay_op = Replay(actors=[actor])
assert next(replay_op).count == 100
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
|
import itertools
import weakref
import six
from mongoengine.common import _import_class
from mongoengine.errors import DoesNotExist, MultipleObjectsReturned
__all__ = ('BaseDict', 'BaseList', 'EmbeddedDocumentList')
class BaseDict(dict):
"""A special dict so we can watch any changes."""
_dereferenced = False
_instance = None
_name = None
def __init__(self, dict_items, instance, name):
Document = _import_class('Document')
EmbeddedDocument = _import_class('EmbeddedDocument')
if isinstance(instance, (Document, EmbeddedDocument)):
self._instance = weakref.proxy(instance)
self._name = name
super(BaseDict, self).__init__(dict_items)
def __getitem__(self, key, *args, **kwargs):
value = super(BaseDict, self).__getitem__(key)
EmbeddedDocument = _import_class('EmbeddedDocument')
if isinstance(value, EmbeddedDocument) and value._instance is None:
value._instance = self._instance
elif not isinstance(value, BaseDict) and isinstance(value, dict):
value = BaseDict(value, None, '%s.%s' % (self._name, key))
super(BaseDict, self).__setitem__(key, value)
value._instance = self._instance
elif not isinstance(value, BaseList) and isinstance(value, list):
value = BaseList(value, None, '%s.%s' % (self._name, key))
super(BaseDict, self).__setitem__(key, value)
value._instance = self._instance
return value
def __setitem__(self, key, value, *args, **kwargs):
self._mark_as_changed(key)
return super(BaseDict, self).__setitem__(key, value)
def __delete__(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).__delete__(*args, **kwargs)
def __delitem__(self, key, *args, **kwargs):
self._mark_as_changed(key)
return super(BaseDict, self).__delitem__(key)
def __delattr__(self, key, *args, **kwargs):
self._mark_as_changed(key)
return super(BaseDict, self).__delattr__(key)
def __getstate__(self):
self.instance = None
self._dereferenced = False
return self
def __setstate__(self, state):
self = state
return self
def clear(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).clear()
def pop(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).pop(*args, **kwargs)
def popitem(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).popitem()
def setdefault(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).setdefault(*args, **kwargs)
def update(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).update(*args, **kwargs)
def _mark_as_changed(self, key=None):
if hasattr(self._instance, '_mark_as_changed'):
if key:
self._instance._mark_as_changed('%s.%s' % (self._name, key))
else:
self._instance._mark_as_changed(self._name)
class BaseList(list):
"""A special list so we can watch any changes."""
_dereferenced = False
_instance = None
_name = None
def __init__(self, list_items, instance, name):
Document = _import_class('Document')
EmbeddedDocument = _import_class('EmbeddedDocument')
if isinstance(instance, (Document, EmbeddedDocument)):
self._instance = weakref.proxy(instance)
self._name = name
super(BaseList, self).__init__(list_items)
def __getitem__(self, key, *args, **kwargs):
value = super(BaseList, self).__getitem__(key)
EmbeddedDocument = _import_class('EmbeddedDocument')
if isinstance(value, EmbeddedDocument) and value._instance is None:
value._instance = self._instance
elif not isinstance(value, BaseDict) and isinstance(value, dict):
value = BaseDict(value, None, '%s.%s' % (self._name, key))
super(BaseList, self).__setitem__(key, value)
value._instance = self._instance
elif not isinstance(value, BaseList) and isinstance(value, list):
value = BaseList(value, None, '%s.%s' % (self._name, key))
super(BaseList, self).__setitem__(key, value)
value._instance = self._instance
return value
def __iter__(self):
for i in six.moves.range(self.__len__()):
yield self[i]
def __setitem__(self, key, value, *args, **kwargs):
if isinstance(key, slice):
self._mark_as_changed()
else:
self._mark_as_changed(key)
return super(BaseList, self).__setitem__(key, value)
def __delitem__(self, key, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).__delitem__(key)
def __setslice__(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).__setslice__(*args, **kwargs)
def __delslice__(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).__delslice__(*args, **kwargs)
def __getstate__(self):
self.instance = None
self._dereferenced = False
return self
def __setstate__(self, state):
self = state
return self
def __iadd__(self, other):
self._mark_as_changed()
return super(BaseList, self).__iadd__(other)
def __imul__(self, other):
self._mark_as_changed()
return super(BaseList, self).__imul__(other)
def append(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).append(*args, **kwargs)
def extend(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).extend(*args, **kwargs)
def insert(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).insert(*args, **kwargs)
def pop(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).pop(*args, **kwargs)
def remove(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).remove(*args, **kwargs)
def reverse(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).reverse()
def sort(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).sort(*args, **kwargs)
def _mark_as_changed(self, key=None):
if hasattr(self._instance, '_mark_as_changed'):
if key:
self._instance._mark_as_changed(
'%s.%s' % (self._name, key % len(self))
)
else:
self._instance._mark_as_changed(self._name)
class EmbeddedDocumentList(BaseList):
@classmethod
def __match_all(cls, embedded_doc, kwargs):
"""Return True if a given embedded doc matches all the filter
kwargs. If it doesn't return False.
"""
for key, expected_value in kwargs.items():
doc_val = getattr(embedded_doc, key)
if doc_val != expected_value and six.text_type(doc_val) != expected_value:
return False
return True
@classmethod
def __only_matches(cls, embedded_docs, kwargs):
"""Return embedded docs that match the filter kwargs."""
if not kwargs:
return embedded_docs
return [doc for doc in embedded_docs if cls.__match_all(doc, kwargs)]
def __init__(self, list_items, instance, name):
super(EmbeddedDocumentList, self).__init__(list_items, instance, name)
self._instance = instance
def filter(self, **kwargs):
"""
Filters the list by only including embedded documents with the
given keyword arguments.
:param kwargs: The keyword arguments corresponding to the fields to
filter on. *Multiple arguments are treated as if they are ANDed
together.*
:return: A new ``EmbeddedDocumentList`` containing the matching
embedded documents.
Raises ``AttributeError`` if a given keyword is not a valid field for
the embedded document class.
"""
values = self.__only_matches(self, kwargs)
return EmbeddedDocumentList(values, self._instance, self._name)
def exclude(self, **kwargs):
"""
Filters the list by excluding embedded documents with the given
keyword arguments.
:param kwargs: The keyword arguments corresponding to the fields to
exclude on. *Multiple arguments are treated as if they are ANDed
together.*
:return: A new ``EmbeddedDocumentList`` containing the non-matching
embedded documents.
Raises ``AttributeError`` if a given keyword is not a valid field for
the embedded document class.
"""
exclude = self.__only_matches(self, kwargs)
values = [item for item in self if item not in exclude]
return EmbeddedDocumentList(values, self._instance, self._name)
def count(self):
"""
The number of embedded documents in the list.
:return: The length of the list, equivalent to the result of ``len()``.
"""
return len(self)
def get(self, **kwargs):
"""
Retrieves an embedded document determined by the given keyword
arguments.
:param kwargs: The keyword arguments corresponding to the fields to
search on. *Multiple arguments are treated as if they are ANDed
together.*
:return: The embedded document matched by the given keyword arguments.
Raises ``DoesNotExist`` if the arguments used to query an embedded
document returns no results. ``MultipleObjectsReturned`` if more
than one result is returned.
"""
values = self.__only_matches(self, kwargs)
if len(values) == 0:
raise DoesNotExist(
'%s matching query does not exist.' % self._name
)
elif len(values) > 1:
raise MultipleObjectsReturned(
'%d items returned, instead of 1' % len(values)
)
return values[0]
def first(self):
"""Return the first embedded document in the list, or ``None``
if empty.
"""
if len(self) > 0:
return self[0]
def create(self, **values):
"""
Creates a new embedded document and saves it to the database.
.. note::
The embedded document changes are not automatically saved
to the database after calling this method.
:param values: A dictionary of values for the embedded document.
:return: The new embedded document instance.
"""
name = self._name
EmbeddedClass = self._instance._fields[name].field.document_type_obj
self._instance[self._name].append(EmbeddedClass(**values))
return self._instance[self._name][-1]
def save(self, *args, **kwargs):
"""
Saves the ancestor document.
:param args: Arguments passed up to the ancestor Document's save
method.
:param kwargs: Keyword arguments passed up to the ancestor Document's
save method.
"""
self._instance.save(*args, **kwargs)
def delete(self):
"""
Deletes the embedded documents from the database.
.. note::
The embedded document changes are not automatically saved
to the database after calling this method.
:return: The number of entries deleted.
"""
values = list(self)
for item in values:
self._instance[self._name].remove(item)
return len(values)
def update(self, **update):
"""
Updates the embedded documents with the given update values.
.. note::
The embedded document changes are not automatically saved
to the database after calling this method.
:param update: A dictionary of update values to apply to each
embedded document.
:return: The number of entries updated.
"""
if len(update) == 0:
return 0
values = list(self)
for item in values:
for k, v in update.items():
setattr(item, k, v)
return len(values)
class StrictDict(object):
__slots__ = ()
_special_fields = set(['get', 'pop', 'iteritems', 'items', 'keys', 'create'])
_classes = {}
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
def __getitem__(self, key):
key = '_reserved_' + key if key in self._special_fields else key
try:
return getattr(self, key)
except AttributeError:
raise KeyError(key)
def __setitem__(self, key, value):
key = '_reserved_' + key if key in self._special_fields else key
return setattr(self, key, value)
def __contains__(self, key):
return hasattr(self, key)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def pop(self, key, default=None):
v = self.get(key, default)
try:
delattr(self, key)
except AttributeError:
pass
return v
def iteritems(self):
for key in self:
yield key, self[key]
def items(self):
return [(k, self[k]) for k in iter(self)]
def iterkeys(self):
return iter(self)
def keys(self):
return list(iter(self))
def __iter__(self):
return (key for key in self.__slots__ if hasattr(self, key))
def __len__(self):
return len(list(self.iteritems()))
def __eq__(self, other):
return self.items() == other.items()
def __ne__(self, other):
return self.items() != other.items()
@classmethod
def create(cls, allowed_keys):
allowed_keys_tuple = tuple(('_reserved_' + k if k in cls._special_fields else k) for k in allowed_keys)
allowed_keys = frozenset(allowed_keys_tuple)
if allowed_keys not in cls._classes:
class SpecificStrictDict(cls):
__slots__ = allowed_keys_tuple
def __repr__(self):
return '{%s}' % ', '.join('"{0!s}": {1!r}'.format(k, v) for k, v in self.items())
cls._classes[allowed_keys] = SpecificStrictDict
return cls._classes[allowed_keys]
|
|
#!/usr/bin/env python3
import os
import sys
import logging
import argparse
import platform
import subprocess
import glob
import hashlib
import traceback
os.environ["PYTHONUNBUFFERED"] = "y"
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(ZULIP_PATH)
from scripts.lib.zulip_tools import run, subprocess_text_output, OKBLUE, ENDC, WARNING, \
get_dev_uuid_var_path, FAIL, parse_lsb_release, file_or_package_hash_updated
from scripts.lib.setup_venv import (
setup_virtualenv, VENV_DEPENDENCIES, REDHAT_VENV_DEPENDENCIES,
THUMBOR_VENV_DEPENDENCIES, YUM_THUMBOR_VENV_DEPENDENCIES,
FEDORA_VENV_DEPENDENCIES
)
from scripts.lib.node_cache import setup_node_modules, NODE_MODULES_CACHE_PATH
from version import PROVISION_VERSION
if False:
# See https://zulip.readthedocs.io/en/latest/testing/mypy.html#mypy-in-production-scripts
from typing import Any, List
from tools.setup.generate_zulip_bots_static_files import generate_zulip_bots_static_files
SUPPORTED_PLATFORMS = {
"Ubuntu": [
"trusty",
"xenial",
"bionic",
],
"Debian": [
"stretch",
],
"CentOS": [
"centos7",
],
"Fedora": [
"fedora29",
],
"RedHat": [
"rhel7",
]
}
VENV_PATH = "/srv/zulip-py3-venv"
VAR_DIR_PATH = os.path.join(ZULIP_PATH, 'var')
LOG_DIR_PATH = os.path.join(VAR_DIR_PATH, 'log')
UPLOAD_DIR_PATH = os.path.join(VAR_DIR_PATH, 'uploads')
TEST_UPLOAD_DIR_PATH = os.path.join(VAR_DIR_PATH, 'test_uploads')
COVERAGE_DIR_PATH = os.path.join(VAR_DIR_PATH, 'coverage')
NODE_TEST_COVERAGE_DIR_PATH = os.path.join(VAR_DIR_PATH, 'node-coverage')
is_travis = 'TRAVIS' in os.environ
is_circleci = 'CIRCLECI' in os.environ
# TODO: De-duplicate this with emoji_dump.py
EMOJI_CACHE_PATH = "/srv/zulip-emoji-cache"
if is_travis:
# In Travis CI, we don't have root access
EMOJI_CACHE_PATH = "/home/travis/zulip-emoji-cache"
if not os.path.exists(os.path.join(ZULIP_PATH, ".git")):
print(FAIL + "Error: No Zulip git repository present!" + ENDC)
print("To setup the Zulip development environment, you should clone the code")
print("from GitHub, rather than using a Zulip production release tarball.")
sys.exit(1)
# Check the RAM on the user's system, and throw an effort if <1.5GB.
# This avoids users getting segfaults running `pip install` that are
# generally more annoying to debug.
with open("/proc/meminfo") as meminfo:
ram_size = meminfo.readlines()[0].strip().split(" ")[-2]
ram_gb = float(ram_size) / 1024.0 / 1024.0
if ram_gb < 1.5:
print("You have insufficient RAM (%s GB) to run the Zulip development environment." % (
round(ram_gb, 2),))
print("We recommend at least 2 GB of RAM, and require at least 1.5 GB.")
sys.exit(1)
try:
UUID_VAR_PATH = get_dev_uuid_var_path(create_if_missing=True)
os.makedirs(UUID_VAR_PATH, exist_ok=True)
if os.path.exists(os.path.join(VAR_DIR_PATH, 'zulip-test-symlink')):
os.remove(os.path.join(VAR_DIR_PATH, 'zulip-test-symlink'))
os.symlink(
os.path.join(ZULIP_PATH, 'README.md'),
os.path.join(VAR_DIR_PATH, 'zulip-test-symlink')
)
os.remove(os.path.join(VAR_DIR_PATH, 'zulip-test-symlink'))
except OSError:
print(FAIL + "Error: Unable to create symlinks."
"Make sure you have permission to create symbolic links." + ENDC)
print("See this page for more information:")
print(" https://zulip.readthedocs.io/en/latest/development/setup-vagrant.html#os-symlink-error")
sys.exit(1)
if platform.architecture()[0] == '64bit':
arch = 'amd64'
elif platform.architecture()[0] == '32bit':
arch = "i386"
else:
logging.critical("Only x86 is supported;"
"ping [email protected] if you want another architecture.")
sys.exit(1)
# Ideally we wouldn't need to install a dependency here, before we
# know the codename.
is_rhel_based = os.path.exists("/etc/redhat-release")
if (not is_rhel_based) and (not os.path.exists("/usr/bin/lsb_release")):
subprocess.check_call(["sudo", "apt-get", "install", "-y", "lsb-release"])
distro_info = parse_lsb_release()
vendor = distro_info['DISTRIB_ID']
codename = distro_info['DISTRIB_CODENAME']
family = distro_info['DISTRIB_FAMILY']
if not (vendor in SUPPORTED_PLATFORMS and codename in SUPPORTED_PLATFORMS[vendor]):
logging.critical("Unsupported platform: {} {}".format(vendor, codename))
sys.exit(1)
POSTGRES_VERSION_MAP = {
"stretch": "9.6",
"trusty": "9.3",
"xenial": "9.5",
"bionic": "10",
"centos7": "10",
"fedora29": "10",
"rhel7": "10",
}
POSTGRES_VERSION = POSTGRES_VERSION_MAP[codename]
COMMON_DEPENDENCIES = [
"closure-compiler",
"memcached",
"rabbitmq-server",
"supervisor",
"git",
"wget",
"ca-certificates", # Explicit dependency in case e.g. wget is already installed
"puppet", # Used by lint (`puppet parser validate`)
"gettext", # Used by makemessages i18n
"curl", # Used for fetching PhantomJS as wget occasionally fails on redirects
"moreutils", # Used for sponge command
]
UBUNTU_COMMON_APT_DEPENDENCIES = COMMON_DEPENDENCIES + [
"redis-server",
"hunspell-en-us",
"yui-compressor",
"puppet-lint",
"netcat", # Used for flushing memcached
"libfontconfig1", # Required by phantomjs
] + VENV_DEPENDENCIES + THUMBOR_VENV_DEPENDENCIES
COMMON_YUM_DEPENDENCIES = COMMON_DEPENDENCIES + [
"redis",
"hunspell-en-US",
"yuicompressor",
"rubygem-puppet-lint",
"nmap-ncat",
"fontconfig", # phantomjs dependencies from here until libstdc++
"freetype",
"freetype-devel",
"fontconfig-devel",
"libstdc++"
] + YUM_THUMBOR_VENV_DEPENDENCIES
if vendor in ["Ubuntu", "Debian"]:
SYSTEM_DEPENDENCIES = UBUNTU_COMMON_APT_DEPENDENCIES + [
pkg.format(POSTGRES_VERSION) for pkg in [
"postgresql-{0}",
"postgresql-{0}-tsearch-extras",
"postgresql-{0}-pgroonga",
]
]
elif vendor in ["CentOS", "RedHat"]:
SYSTEM_DEPENDENCIES = COMMON_YUM_DEPENDENCIES + [
pkg.format(POSTGRES_VERSION) for pkg in [
"postgresql{0}-server",
"postgresql{0}",
"postgresql{0}-devel",
"postgresql{0}-pgroonga",
]
] + REDHAT_VENV_DEPENDENCIES
elif vendor == "Fedora":
SYSTEM_DEPENDENCIES = COMMON_YUM_DEPENDENCIES + [
pkg.format(POSTGRES_VERSION) for pkg in [
"postgresql{0}-server",
"postgresql{0}",
"postgresql{0}-devel",
]
] + FEDORA_VENV_DEPENDENCIES
if family == 'redhat':
TSEARCH_STOPWORDS_PATH = "/usr/pgsql-%s/share/tsearch_data/" % (POSTGRES_VERSION,)
else:
TSEARCH_STOPWORDS_PATH = "/usr/share/postgresql/%s/tsearch_data/" % (POSTGRES_VERSION,)
REPO_STOPWORDS_PATH = os.path.join(
ZULIP_PATH,
"puppet",
"zulip",
"files",
"postgresql",
"zulip_english.stop",
)
user_id = os.getuid()
def setup_shell_profile(shell_profile):
# type: (str) -> None
shell_profile_path = os.path.expanduser(shell_profile)
def write_command(command):
# type: (str) -> None
if os.path.exists(shell_profile_path):
with open(shell_profile_path, 'r') as shell_profile_file:
lines = [line.strip() for line in shell_profile_file.readlines()]
if command not in lines:
with open(shell_profile_path, 'a+') as shell_profile_file:
shell_profile_file.writelines(command + '\n')
else:
with open(shell_profile_path, 'w') as shell_profile_file:
shell_profile_file.writelines(command + '\n')
source_activate_command = "source " + os.path.join(VENV_PATH, "bin", "activate")
write_command(source_activate_command)
write_command('cd /srv/zulip')
def install_system_deps(retry=False):
# type: (bool) -> None
# By doing list -> set -> list conversion, we remove duplicates.
deps_to_install = list(set(SYSTEM_DEPENDENCIES))
if family == 'redhat':
install_yum_deps(deps_to_install, retry=retry)
return
if vendor in ["Debian", "Ubuntu"]:
install_apt_deps(deps_to_install, retry=retry)
return
raise AssertionError("Invalid vendor")
def install_apt_deps(deps_to_install, retry=False):
# type: (List[str], bool) -> None
if retry:
print(WARNING + "`apt-get -y install` failed while installing dependencies; retrying..." + ENDC)
# Since a common failure mode is for the caching in
# `setup-apt-repo` to optimize the fast code path to skip
# running `apt-get update` when the target apt repository
# is out of date, we run it explicitly here so that we
# recover automatically.
run(['sudo', 'apt-get', 'update'])
# setup-apt-repo does an `apt-get update`
run(["sudo", "./scripts/lib/setup-apt-repo"])
run(["sudo", "apt-get", "-y", "install", "--no-install-recommends"] + deps_to_install)
def install_yum_deps(deps_to_install, retry=False):
# type: (List[str], bool) -> None
print(WARNING + "RedHat support is still experimental.")
run(["sudo", "./scripts/lib/setup-yum-repo"])
# Hack specific to unregistered RHEL system. The moreutils
# package requires a perl module package, which isn't available in
# the unregistered RHEL repositories.
#
# Error: Package: moreutils-0.49-2.el7.x86_64 (epel)
# Requires: perl(IPC::Run)
yum_extra_flags = [] # type: List[str]
if vendor == 'RedHat':
exitcode, subs_status = subprocess.getstatusoutput("sudo subscription-manager status")
if exitcode == 1:
# TODO this might overkill since `subscription-manager` is already
# called in setup-yum-repo
if 'Status' in subs_status:
# The output is well-formed
yum_extra_flags = ["--skip-broken"]
else:
print("Unrecognized output. `subscription-manager` might not be available")
run(["sudo", "yum", "install", "-y"] + yum_extra_flags + deps_to_install)
if vendor in ["CentOS", "RedHat"]:
# This is how a pip3 is installed to /usr/bin in CentOS/RHEL
# for python35 and later.
run(["sudo", "python36", "-m", "ensurepip"])
# `python36` is not aliased to `python3` by default
run(["sudo", "ln", "-nsf", "/usr/bin/python36", "/usr/bin/python3"])
postgres_dir = 'pgsql-%s' % (POSTGRES_VERSION,)
for cmd in ['pg_config', 'pg_isready', 'psql']:
# Our tooling expects these postgres scripts to be at
# well-known paths. There's an argument for eventually
# making our tooling auto-detect, but this is simpler.
run(["sudo", "ln", "-nsf", "/usr/%s/bin/%s" % (postgres_dir, cmd),
"/usr/bin/%s" % (cmd,)])
# Compile tsearch-extras from scratch, since we maintain the
# package and haven't built an RPM package for it.
run(["sudo", "./scripts/lib/build-tsearch-extras"])
if vendor == "Fedora":
# Compile PGroonga from scratch, since pgroonga upstream
# doesn't provide Fedora packages.
run(["sudo", "./scripts/lib/build-pgroonga"])
# From here, we do the first-time setup/initialization for the postgres database.
pg_datadir = "/var/lib/pgsql/%s/data" % (POSTGRES_VERSION,)
pg_hba_conf = os.path.join(pg_datadir, "pg_hba.conf")
# We can't just check if the file exists with os.path, since the
# current user likely doesn't have permission to read the
# pg_datadir directory.
if subprocess.call(["sudo", "test", "-e", pg_hba_conf]) == 0:
# Skip setup if it has been applied previously
return
run(["sudo", "-H", "/usr/%s/bin/postgresql-%s-setup" % (postgres_dir, POSTGRES_VERSION), "initdb"])
# Use vendored pg_hba.conf, which enables password authentication.
run(["sudo", "cp", "-a", "puppet/zulip/files/postgresql/centos_pg_hba.conf", pg_hba_conf])
# Later steps will ensure postgres is started
def main(options):
# type: (Any) -> int
# yarn and management commands expect to be run from the root of the
# project.
os.chdir(ZULIP_PATH)
# hash the apt dependencies
sha_sum = hashlib.sha1()
for apt_depedency in SYSTEM_DEPENDENCIES:
sha_sum.update(apt_depedency.encode('utf8'))
if vendor in ["Ubuntu", "Debian"]:
sha_sum.update(open('scripts/lib/setup-apt-repo', 'rb').read())
else:
# hash the content of setup-yum-repo and build-*
sha_sum.update(open('scripts/lib/setup-yum-repo', 'rb').read())
build_paths = glob.glob("scripts/lib/build-")
for bp in build_paths:
sha_sum.update(open(bp, 'rb').read())
new_apt_dependencies_hash = sha_sum.hexdigest()
last_apt_dependencies_hash = None
apt_hash_file_path = os.path.join(UUID_VAR_PATH, "apt_dependencies_hash")
with open(apt_hash_file_path, 'a+') as hash_file:
hash_file.seek(0)
last_apt_dependencies_hash = hash_file.read()
if (new_apt_dependencies_hash != last_apt_dependencies_hash):
try:
install_system_deps()
except subprocess.CalledProcessError:
# Might be a failure due to network connection issues. Retrying...
install_system_deps(retry=True)
with open(apt_hash_file_path, 'w') as hash_file:
hash_file.write(new_apt_dependencies_hash)
else:
print("No changes to apt dependencies, so skipping apt operations.")
# Here we install node.
proxy_env = [
"env",
"http_proxy=" + os.environ.get("http_proxy", ""),
"https_proxy=" + os.environ.get("https_proxy", ""),
"no_proxy=" + os.environ.get("no_proxy", ""),
]
run(["sudo", "-H"] + proxy_env + ["scripts/lib/install-node"])
# This is a wrapper around `yarn`, which we run last since
# it can often fail due to network issues beyond our control.
try:
# Hack: We remove `node_modules` as root to work around an
# issue with the symlinks being improperly owned by root.
if os.path.islink("node_modules"):
run(["sudo", "rm", "-f", "node_modules"])
run(["sudo", "mkdir", "-p", NODE_MODULES_CACHE_PATH])
run(["sudo", "chown", "%s:%s" % (user_id, user_id), NODE_MODULES_CACHE_PATH])
setup_node_modules(prefer_offline=True)
except subprocess.CalledProcessError:
print(WARNING + "`yarn install` failed; retrying..." + ENDC)
setup_node_modules()
# Install shellcheck.
run(["sudo", "scripts/lib/install-shellcheck"])
# Import tools/setup_venv.py instead of running it so that we get an
# activated virtualenv for the rest of the provisioning process.
from tools.setup import setup_venvs
setup_venvs.main()
setup_shell_profile('~/.bash_profile')
setup_shell_profile('~/.zprofile')
run(["sudo", "cp", REPO_STOPWORDS_PATH, TSEARCH_STOPWORDS_PATH])
# create log directory `zulip/var/log`
os.makedirs(LOG_DIR_PATH, exist_ok=True)
# create upload directory `var/uploads`
os.makedirs(UPLOAD_DIR_PATH, exist_ok=True)
# create test upload directory `var/test_upload`
os.makedirs(TEST_UPLOAD_DIR_PATH, exist_ok=True)
# create coverage directory `var/coverage`
os.makedirs(COVERAGE_DIR_PATH, exist_ok=True)
# create linecoverage directory `var/node-coverage`
os.makedirs(NODE_TEST_COVERAGE_DIR_PATH, exist_ok=True)
# The `build_emoji` script requires `emoji-datasource` package
# which we install via npm; thus this step is after installing npm
# packages.
if not os.path.isdir(EMOJI_CACHE_PATH):
run(["sudo", "mkdir", EMOJI_CACHE_PATH])
run(["sudo", "chown", "%s:%s" % (user_id, user_id), EMOJI_CACHE_PATH])
run(["tools/setup/emoji/build_emoji"])
# copy over static files from the zulip_bots package
generate_zulip_bots_static_files()
webfont_paths = ["tools/setup/generate-custom-icon-webfont", "static/icons/fonts/template.hbs"]
webfont_paths += glob.glob('static/assets/icons/*')
if file_or_package_hash_updated(webfont_paths, "webfont_files_hash", options.is_force):
run(["tools/setup/generate-custom-icon-webfont"])
else:
print("No need to run `tools/setup/generate-custom-icon-webfont`.")
build_pygments_data_paths = ["tools/setup/build_pygments_data", "tools/setup/lang.json"]
from pygments import __version__ as pygments_version
if file_or_package_hash_updated(build_pygments_data_paths, "build_pygments_data_hash", options.is_force,
[pygments_version]):
run(["tools/setup/build_pygments_data"])
else:
print("No need to run `tools/setup/build_pygments_data`.")
run(["scripts/setup/generate_secrets.py", "--development"])
update_authors_json_paths = ["tools/update-authors-json", "zerver/tests/fixtures/authors.json"]
if file_or_package_hash_updated(update_authors_json_paths, "update_authors_json_hash", options.is_force):
run(["tools/update-authors-json", "--use-fixture"])
else:
print("No need to run `tools/update-authors-json`.")
email_source_paths = ["tools/inline-email-css", "templates/zerver/emails/email.css"]
email_source_paths += glob.glob('templates/zerver/emails/*.source.html')
if file_or_package_hash_updated(email_source_paths, "last_email_source_files_hash", options.is_force):
run(["tools/inline-email-css"])
else:
print("No need to run `tools/inline-email-css`.")
if is_circleci or (is_travis and not options.is_production_travis):
run(["sudo", "service", "rabbitmq-server", "restart"])
run(["sudo", "service", "redis-server", "restart"])
run(["sudo", "service", "memcached", "restart"])
run(["sudo", "service", "postgresql", "restart"])
elif family == 'redhat':
for service in ["postgresql-%s" % (POSTGRES_VERSION,), "rabbitmq-server", "memcached", "redis"]:
run(["sudo", "-H", "systemctl", "enable", service])
run(["sudo", "-H", "systemctl", "start", service])
elif options.is_docker:
run(["sudo", "service", "rabbitmq-server", "restart"])
run(["sudo", "pg_dropcluster", "--stop", POSTGRES_VERSION, "main"])
run(["sudo", "pg_createcluster", "-e", "utf8", "--start", POSTGRES_VERSION, "main"])
run(["sudo", "service", "redis-server", "restart"])
run(["sudo", "service", "memcached", "restart"])
if not options.is_production_travis:
# The following block is skipped for the production Travis
# suite, because that suite doesn't make use of these elements
# of the development environment (it just uses the development
# environment to build a release tarball).
# Need to set up Django before using template_database_status
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings")
import django
django.setup()
from zerver.lib.test_fixtures import template_database_status, run_db_migrations
try:
from zerver.lib.queue import SimpleQueueClient
SimpleQueueClient()
rabbitmq_is_configured = True
except Exception:
rabbitmq_is_configured = False
if options.is_force or not rabbitmq_is_configured:
run(["scripts/setup/configure-rabbitmq"])
else:
print("RabbitMQ is already configured.")
migration_status_path = os.path.join(UUID_VAR_PATH, "migration_status_dev")
dev_template_db_status = template_database_status(
migration_status=migration_status_path,
settings="zproject.settings",
database_name="zulip",
)
if options.is_force or dev_template_db_status == 'needs_rebuild':
run(["tools/setup/postgres-init-dev-db"])
run(["tools/do-destroy-rebuild-database"])
elif dev_template_db_status == 'run_migrations':
run_db_migrations('dev')
elif dev_template_db_status == 'current':
print("No need to regenerate the dev DB.")
test_template_db_status = template_database_status()
if options.is_force or test_template_db_status == 'needs_rebuild':
run(["tools/setup/postgres-init-test-db"])
run(["tools/do-destroy-rebuild-test-database"])
elif test_template_db_status == 'run_migrations':
run_db_migrations('test')
elif test_template_db_status == 'current':
print("No need to regenerate the test DB.")
# Consider updating generated translations data: both `.mo`
# files and `language-options.json`.
paths = ['zerver/management/commands/compilemessages.py']
paths += glob.glob('static/locale/*/LC_MESSAGES/*.po')
paths += glob.glob('static/locale/*/translations.json')
if file_or_package_hash_updated(paths, "last_compilemessages_hash", options.is_force):
run(["./manage.py", "compilemessages"])
else:
print("No need to run `manage.py compilemessages`.")
run(["scripts/lib/clean-unused-caches"])
version_file = os.path.join(UUID_VAR_PATH, 'provision_version')
print('writing to %s\n' % (version_file,))
open(version_file, 'w').write(PROVISION_VERSION + '\n')
print()
print(OKBLUE + "Zulip development environment setup succeeded!" + ENDC)
return 0
if __name__ == "__main__":
description = ("Provision script to install Zulip")
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--force', action='store_true', dest='is_force',
default=False,
help="Ignore all provisioning optimizations.")
parser.add_argument('--production-travis', action='store_true',
dest='is_production_travis',
default=False,
help="Provision for Travis with production settings.")
parser.add_argument('--docker', action='store_true',
dest='is_docker',
default=False,
help="Provision for Docker.")
options = parser.parse_args()
sys.exit(main(options))
|
|
#!/usr/bin/env python
#
# Generated by generateDS.py.
#
import sys
import out2_sup as supermod
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError("Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# Globals
#
ExternalEncoding = 'ascii'
#
# Data representation classes
#
class peopleSub(supermod.people):
def __init__(self, comments=None, person=None, programmer=None, python_programmer=None, java_programmer=None):
super(peopleSub, self).__init__(comments, person, programmer, python_programmer, java_programmer, )
supermod.people.subclass = peopleSub
# end class peopleSub
class commentsSub(supermod.comments):
def __init__(self, emp=None, valueOf_=None, mixedclass_=None, content_=None):
super(commentsSub, self).__init__(emp, valueOf_, mixedclass_, content_, )
supermod.comments.subclass = commentsSub
# end class commentsSub
class personSub(supermod.person):
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, extensiontype_=None):
super(personSub, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, extensiontype_, )
supermod.person.subclass = personSub
# end class personSub
class programmerSub(supermod.programmer):
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, language=None, area=None, attrnegint=None, attrposint=None, attrnonnegint=None, attrnonposint=None, email=None, elposint=None, elnonposint=None, elnegint=None, elnonnegint=None, eldate=None, eltoken=None, elshort=None, ellong=None, elparam=None, elarraytypes=None, extensiontype_=None):
super(programmerSub, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, language, area, attrnegint, attrposint, attrnonnegint, attrnonposint, email, elposint, elnonposint, elnegint, elnonnegint, eldate, eltoken, elshort, ellong, elparam, elarraytypes, extensiontype_, )
supermod.programmer.subclass = programmerSub
# end class programmerSub
class paramSub(supermod.param):
def __init__(self, semantic=None, name=None, flow=None, sid=None, type_=None, id=None, valueOf_=None):
super(paramSub, self).__init__(semantic, name, flow, sid, type_, id, valueOf_, )
supermod.param.subclass = paramSub
# end class paramSub
class python_programmerSub(supermod.python_programmer):
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, language=None, area=None, attrnegint=None, attrposint=None, attrnonnegint=None, attrnonposint=None, email=None, elposint=None, elnonposint=None, elnegint=None, elnonnegint=None, eldate=None, eltoken=None, elshort=None, ellong=None, elparam=None, elarraytypes=None, nick_name=None, favorite_editor=None):
super(python_programmerSub, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, language, area, attrnegint, attrposint, attrnonnegint, attrnonposint, email, elposint, elnonposint, elnegint, elnonnegint, eldate, eltoken, elshort, ellong, elparam, elarraytypes, nick_name, favorite_editor, )
supermod.python_programmer.subclass = python_programmerSub
# end class python_programmerSub
class java_programmerSub(supermod.java_programmer):
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, language=None, area=None, attrnegint=None, attrposint=None, attrnonnegint=None, attrnonposint=None, email=None, elposint=None, elnonposint=None, elnegint=None, elnonnegint=None, eldate=None, eltoken=None, elshort=None, ellong=None, elparam=None, elarraytypes=None, status=None, nick_name=None, favorite_editor=None):
super(java_programmerSub, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, language, area, attrnegint, attrposint, attrnonnegint, attrnonposint, email, elposint, elnonposint, elnegint, elnonnegint, eldate, eltoken, elshort, ellong, elparam, elarraytypes, status, nick_name, favorite_editor, )
supermod.java_programmer.subclass = java_programmerSub
# end class java_programmerSub
class agentSub(supermod.agent):
def __init__(self, firstname=None, lastname=None, priority=None, info=None):
super(agentSub, self).__init__(firstname, lastname, priority, info, )
supermod.agent.subclass = agentSub
# end class agentSub
class special_agentSub(supermod.special_agent):
def __init__(self, firstname=None, lastname=None, priority=None, info=None):
super(special_agentSub, self).__init__(firstname, lastname, priority, info, )
supermod.special_agent.subclass = special_agentSub
# end class special_agentSub
class boosterSub(supermod.booster):
def __init__(self, firstname=None, lastname=None, other_name=None, classxx=None, other_value=None, type_=None, client_handler=None):
super(boosterSub, self).__init__(firstname, lastname, other_name, classxx, other_value, type_, client_handler, )
supermod.booster.subclass = boosterSub
# end class boosterSub
class infoSub(supermod.info):
def __init__(self, rating=None, type_=None, name=None):
super(infoSub, self).__init__(rating, type_, name, )
supermod.info.subclass = infoSub
# end class infoSub
class client_handlerTypeSub(supermod.client_handlerType):
def __init__(self, fullname=None, refid=None):
super(client_handlerTypeSub, self).__init__(fullname, refid, )
supermod.client_handlerType.subclass = client_handlerTypeSub
# end class client_handlerTypeSub
def get_root_tag(node):
tag = supermod.Tag_pattern_.match(node.tag).groups()[-1]
rootClass = None
if hasattr(supermod, tag):
rootClass = getattr(supermod, tag)
return tag, rootClass
def parse(inFilename):
doc = parsexml_(inFilename)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'people'
rootClass = supermod.people
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='')
doc = None
return rootObj
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'people'
rootClass = supermod.people
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseLiteral(inFilename):
doc = parsexml_(inFilename)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'people'
rootClass = supermod.people
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('#from out2_sup import *\n\n')
sys.stdout.write('import out2_sup as model_\n\n')
sys.stdout.write('rootObj = model_.people(\n')
rootObj.exportLiteral(sys.stdout, 0, name_="people")
sys.stdout.write(')\n')
return rootObj
USAGE_TEXT = """
Usage: python ???.py <infilename>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def main():
args = sys.argv[1:]
if len(args) != 1:
usage()
infilename = args[0]
root = parse(infilename)
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
|
|
"""Google Cloud Hierarchical Firewall Generator.
Hierarchical Firewalls (HF) are represented in a SecurityPolicy GCP resouce.
"""
import copy
import re
from typing import Dict, Any
from absl import logging
from capirca.lib import gcp
from capirca.lib import nacaddr
class ExceededCostError(gcp.Error):
"""Raised when the total cost of a policy is above the maximum."""
class DifferentPolicyNameError(gcp.Error):
"""Raised when headers in the same policy have a different policy name."""
class ApiVersionSyntaxMap:
"""Defines the syntax changes between different API versions.
http://cloud/compute/docs/reference/rest/v1/firewallPolicies/addRule
http://cloud/compute/docs/reference/rest/beta/organizationSecurityPolicies/addRule
"""
SYNTAX_MAP = {
'beta': {
'display_name': 'displayName',
'dest_ip_range': 'destIpRanges',
'src_ip_range': 'srcIpRanges',
'layer_4_config': 'layer4Configs'
},
'ga': {
'display_name': 'shortName',
'dest_ip_range': 'destIpRanges',
'src_ip_range': 'srcIpRanges',
'layer_4_config': 'layer4Configs'
}
}
class Term(gcp.Term):
"""Used to create an individual term."""
ACTION_MAP = {'accept': 'allow', 'next': 'goto_next'}
_MAX_TERM_COMMENT_LENGTH = 64
_TARGET_RESOURCE_FORMAT = 'https://www.googleapis.com/compute/v1/projects/{}/global/networks/{}'
_TERM_ADDRESS_LIMIT = 256
_TERM_TARGET_RESOURCES_LIMIT = 256
_TERM_DESTINATION_PORTS_LIMIT = 256
def __init__(self,
term,
address_family='inet',
policy_inet_version='inet',
api_version='beta'):
super().__init__(term)
self.address_family = address_family
self.term = term
self.skip = False
self._ValidateTerm()
self.api_version = api_version
# This is to handle mixed, where the policy_inet_version is mixed,
# but the term inet version is either inet/inet6.
# This is only useful for term name and priority.
self.policy_inet_version = policy_inet_version
def _ValidateTerm(self):
if self.term.destination_tag or self.term.source_tag:
raise gcp.TermError('Hierarchical Firewall does not support tags')
if len(self.term.target_resources) > self._TERM_TARGET_RESOURCES_LIMIT:
raise gcp.TermError(
'Term: %s target_resources field contains %s resources. It should not contain more than "%s".'
% (self.term.name, str(len(
self.term.target_resources)), self._TERM_TARGET_RESOURCES_LIMIT))
for proj, vpc in self.term.target_resources:
if not gcp.IsProjectIDValid(proj):
raise gcp.TermError(
'Project ID "%s" must be 6 to 30 lowercase letters, digits, or hyphens.'
' It must start with a letter. Trailing hyphens are prohibited.' %
proj)
if not gcp.IsVPCNameValid(vpc):
raise gcp.TermError('VPC name "%s" must start with a lowercase letter '
'followed by up to 62 lowercase letters, numbers, '
'or hyphens, and cannot end with a hyphen.' % vpc)
if self.term.source_port:
raise gcp.TermError('Hierarchical firewall does not support source port '
'restrictions.')
if self.term.option:
raise gcp.TermError('Hierarchical firewall does not support the '
'TCP_ESTABLISHED option.')
if len(self.term.destination_port) > self._TERM_DESTINATION_PORTS_LIMIT:
raise gcp.TermError(
'Term: %s destination_port field contains %s ports. It should not contain more than "%s".'
% (self.term.name, str(len(
self.term.destination_port)), self._TERM_DESTINATION_PORTS_LIMIT))
# Since policy_inet_version is used to handle 'mixed'.
# We should error out if the individual term's inet version (address_family)
# is anything other than inet/inet6, since this should never happen
# naturally. Something has gone horribly wrong if you encounter this error.
if self.address_family == 'mixed':
raise gcp.TermError(
'Hierarchical firewall rule has incorrect inet_version for rule: %s' %
self.term.name)
def ConvertToDict(self, priority_index):
"""Converts term to dict representation of SecurityPolicy.Rule JSON format.
Takes all of the attributes associated with a term (match, action, etc) and
converts them into a dictionary which most closely represents
the SecurityPolicy.Rule JSON format.
Args:
priority_index: An integer priority value assigned to the term.
Returns:
A dict term.
"""
if self.skip:
return {}
rules = []
# Identify if this is inet6 processing for a term under a mixed policy.
mixed_policy_inet6_term = False
if self.policy_inet_version == 'mixed' and self.address_family == 'inet6':
mixed_policy_inet6_term = True
term_dict = {
'action': self.ACTION_MAP.get(self.term.action[0], self.term.action[0]),
'direction': self.term.direction,
'priority': priority_index
}
# Get the correct syntax for API versions.
src_ip_range = ApiVersionSyntaxMap.SYNTAX_MAP[
self.api_version]['src_ip_range']
dest_ip_range = ApiVersionSyntaxMap.SYNTAX_MAP[
self.api_version]['dest_ip_range']
layer_4_config = ApiVersionSyntaxMap.SYNTAX_MAP[
self.api_version]['layer_4_config']
target_resources = []
for proj, vpc in self.term.target_resources:
target_resources.append(self._TARGET_RESOURCE_FORMAT.format(proj, vpc))
if target_resources: # Only set when non-empty.
term_dict['targetResources'] = target_resources
term_dict['enableLogging'] = self._GetLoggingSetting()
# This combo provides ability to identify the rule.
term_name = self.term.name
if mixed_policy_inet6_term:
term_name = gcp.GetIpv6TermName(term_name)
raw_description = term_name + ': ' + ' '.join(self.term.comment)
term_dict['description'] = gcp.TruncateString(raw_description,
self._MAX_TERM_COMMENT_LENGTH)
filtered_protocols = []
for proto in self.term.protocol:
# ICMP filtering by inet_version
# Since each term has inet_version, 'mixed' is correctly processed here.
if proto == 'icmp' and self.address_family == 'inet6':
logging.warning(
'WARNING: Term %s is being rendered for inet6, ICMP '
'protocol will not be rendered.', self.term.name)
continue
if proto == 'icmpv6' and self.address_family == 'inet':
logging.warning(
'WARNING: Term %s is being rendered for inet, ICMPv6 '
'protocol will not be rendered.', self.term.name)
continue
if proto == 'igmp' and self.address_family == 'inet6':
logging.warning(
'WARNING: Term %s is being rendered for inet6, IGMP '
'protocol will not be rendered.', self.term.name)
continue
filtered_protocols.append(proto)
# If there is no protocol left after ICMP/IGMP filtering, drop this term.
# But only do this for terms that originally had protocols.
# Otherwise you end up dropping the default-deny.
if self.term.protocol and not filtered_protocols:
return {}
protocols_and_ports = []
if not self.term.protocol:
# Empty protocol list means any protocol, but any protocol in HF is
# represented as "all"
protocols_and_ports = [{'ipProtocol': 'all'}]
else:
for proto in filtered_protocols:
# If the protocol name is not supported, use the protocol number.
if proto not in self._ALLOW_PROTO_NAME:
proto = str(self.PROTO_MAP[proto])
logging.info('INFO: Term %s is being rendered using protocol number',
self.term.name)
proto_ports = {'ipProtocol': proto}
if self.term.destination_port:
ports = self._GetPorts()
if ports: # Only set when non-empty.
proto_ports['ports'] = ports
protocols_and_ports.append(proto_ports)
if self.api_version == 'ga':
term_dict['match'] = {layer_4_config: protocols_and_ports}
else:
term_dict['match'] = {'config': {layer_4_config: protocols_and_ports}}
# match needs a field called versionedExpr with value FIREWALL
# See documentation:
# https://cloud.google.com/compute/docs/reference/rest/beta/organizationSecurityPolicies/addRule
term_dict['match']['versionedExpr'] = 'FIREWALL'
ip_version = self.AF_MAP[self.address_family]
if ip_version == 4:
any_ip = [nacaddr.IP('0.0.0.0/0')]
else:
any_ip = [nacaddr.IPv6('::/0')]
if self.term.direction == 'EGRESS':
daddrs = self.term.GetAddressOfVersion('destination_address', ip_version)
# If the address got filtered out and is empty due to address family, we
# don't render the term. At this point of term processing, the direction
# has already been validated, so we can just log and return empty rule.
if self.term.destination_address and not daddrs:
logging.warning(
'WARNING: Term %s is not being rendered for %s, '
'because there are no addresses of that family.', self.term.name,
self.address_family)
return []
# This should only happen if there were no addresses set originally.
if not daddrs:
daddrs = any_ip
destination_address_chunks = [
daddrs[x:x + self._TERM_ADDRESS_LIMIT]
for x in range(0, len(daddrs), self._TERM_ADDRESS_LIMIT)
]
for daddr_chunk in destination_address_chunks:
rule = copy.deepcopy(term_dict)
if self.api_version == 'ga':
rule['match'][dest_ip_range] = [
daddr.with_prefixlen for daddr in daddr_chunk
]
else:
rule['match']['config'][dest_ip_range] = [
daddr.with_prefixlen for daddr in daddr_chunk
]
rule['priority'] = priority_index
rules.append(rule)
priority_index += 1
else:
saddrs = self.term.GetAddressOfVersion('source_address', ip_version)
# If the address got filtered out and is empty due to address family, we
# don't render the term. At this point of term processing, the direction
# has already been validated, so we can just log and return empty rule.
if self.term.source_address and not saddrs:
logging.warning(
'WARNING: Term %s is not being rendered for %s, '
'because there are no addresses of that family.', self.term.name,
self.address_family)
return []
# This should only happen if there were no addresses set originally.
if not saddrs:
saddrs = any_ip
source_address_chunks = [
saddrs[x:x + self._TERM_ADDRESS_LIMIT]
for x in range(0, len(saddrs), self._TERM_ADDRESS_LIMIT)
]
for saddr_chunk in source_address_chunks:
rule = copy.deepcopy(term_dict)
if self.api_version == 'ga':
rule['match'][src_ip_range] = [
saddr.with_prefixlen for saddr in saddr_chunk
]
else:
rule['match']['config'][src_ip_range] = [
saddr.with_prefixlen for saddr in saddr_chunk
]
rule['priority'] = priority_index
rules.append(rule)
priority_index += 1
return rules
def __str__(self):
return ''
class HierarchicalFirewall(gcp.GCP):
"""A GCP Hierarchical Firewall policy."""
SUFFIX = '.gcphf'
_ANY_IP = {
'inet': nacaddr.IP('0.0.0.0/0'),
'inet6': nacaddr.IP('::/0'),
}
_PLATFORM = 'gcp_hf'
_SUPPORTED_AF = frozenset(['inet', 'inet6', 'mixed'])
# Beta is the default API version. GA supports IPv6 (inet6/mixed).
_SUPPORTED_API_VERSION = frozenset(['beta', 'ga'])
_DEFAULT_MAXIMUM_COST = 100
def _BuildTokens(self):
"""Build supported tokens for platform.
Returns:
Tuple containing both supported tokens and sub tokens.
"""
supported_tokens, _ = super()._BuildTokens()
supported_tokens |= {
'destination_tag', 'expiration', 'source_tag', 'translated',
'target_resources', 'logging'
}
supported_tokens -= {
'destination_address_exclude', 'expiration', 'icmp_type',
'source_address_exclude', 'verbatim'
}
supported_sub_tokens = {'action': {'accept', 'deny', 'next'}}
return supported_tokens, supported_sub_tokens
def _TranslatePolicy(self, pol, exp_info):
"""Translates a Capirca policy into a HF-specific data structure.
Takes in a POL file, parses each term and populates the policy
dict. Each term in this list is a dictionary formatted according to
HF's rule API specification. Additionally, checks for its quota.
Args:
pol: A Policy() object representing a given POL file.
exp_info: An int that specifies number of weeks until policy expiry.
Raises:
ExceededCostError: Raised when the cost of a policy exceeds the default
maximum cost.
HeaderError: Raised when the header cannot be parsed or a header option is
invalid.
DifferentPolicyNameError: Raised when a header policy name differs from
other in the same policy.
"""
self.policies = []
policy = {
'rules': [],
'type': 'FIREWALL'
}
is_policy_modified = False
counter = 1
total_cost = 0
policies_max_cost = self._DEFAULT_MAXIMUM_COST
previous_max_cost = -1
for header, terms in pol.filters:
if self._PLATFORM not in header.platforms:
continue
filter_options = header.FilterOptions(self._PLATFORM)
is_policy_modified = True
# Get term direction if set.
direction = 'INGRESS'
for i in self._GOOD_DIRECTION:
if i in filter_options:
direction = i
filter_options.remove(i)
# Get the address family if set.
address_family = 'inet'
for i in self._SUPPORTED_AF:
if i in filter_options:
address_family = i
filter_options.remove(i)
# Get the compute API version if set.
api_version = 'beta'
for i in self._SUPPORTED_API_VERSION:
if i in filter_options:
api_version = i
filter_options.remove(i)
break
# Find the default maximum cost of a policy, an integer, if specified.
max_cost = self._DEFAULT_MAXIMUM_COST
for opt in filter_options:
try:
max_cost = int(opt)
filter_options.remove(opt)
break
except ValueError:
continue
if max_cost > 65536:
raise gcp.HeaderError(
'Default maximum cost cannot be higher than 65536')
if previous_max_cost != -1 and previous_max_cost != max_cost:
raise gcp.HeaderError(
'Maximum costs of each policy specified must be equal. '
'Unequal costs found: %d and %d' % (previous_max_cost, max_cost))
policies_max_cost = max_cost
previous_max_cost = max_cost
display_name = ApiVersionSyntaxMap.SYNTAX_MAP[api_version]['display_name']
# Get policy name and validate it to meet displayName requirements.
policy_name = header.FilterName(self._PLATFORM)
if not policy_name:
raise gcp.HeaderError(
'Policy name was not specified in header')
filter_options.remove(policy_name)
if len(policy_name) > 63:
raise gcp.HeaderError(
'Policy name "%s" is too long; the maximum number of characters '
'allowed is 63' % (policy_name))
if not bool(re.match('^[a-z]([-a-z0-9]*[a-z0-9])?$', policy_name)):
raise gcp.HeaderError(
'Invalid string for displayName, "%s"; the first character must be '
'a lowercase letter, and all following characters must be a dash, '
'lowercase letter, or digit, except the last character, which '
'cannot be a dash.' % (policy_name))
if display_name in policy and policy[display_name] != policy_name:
raise DifferentPolicyNameError(
'Policy names that are from the same policy are expected to be '
'equal, but %s is different to %s' %
(policy[display_name], policy_name))
policy[display_name] = policy_name
# If there are remaining options, they are unknown/unsupported options.
if filter_options:
raise gcp.HeaderError(
'Unsupported or unknown filter options %s in policy %s ' %
(str(filter_options), policy_name))
# Handle mixed for each indvidual term as inet and inet6.
# inet/inet6 are treated the same.
term_address_families = []
if address_family == 'mixed':
term_address_families = ['inet', 'inet6']
else:
term_address_families = [address_family]
for term in terms:
if term.stateless_reply:
continue
if gcp.IsDefaultDeny(term):
if direction == 'EGRESS':
if address_family != 'mixed':
# Default deny also gets processed as part of terms processing.
# The name and priority get updated there.
term.destination_address = [self._ANY_IP[address_family]]
else:
term.destination_address = [
self._ANY_IP['inet'], self._ANY_IP['inet6']
]
else:
if address_family != 'mixed':
term.source_address = [self._ANY_IP[address_family]]
else:
term.source_address = [
self._ANY_IP['inet'], self._ANY_IP['inet6']
]
term.name = self.FixTermLength(term.name)
term.direction = direction
# Only generate the term if it's for the appropriate platform
if term.platform:
if self._PLATFORM not in term.platform:
continue
if term.platform_exclude:
if self._PLATFORM in term.platform_exclude:
continue
for term_af in term_address_families:
rules = Term(
term,
address_family=term_af,
policy_inet_version=address_family,
api_version=api_version).ConvertToDict(priority_index=counter)
if not rules:
continue
for dict_term in rules:
total_cost += GetRuleTupleCount(dict_term, api_version)
policy['rules'].append(dict_term)
counter += len(rules)
# We want to check the total policy cost, not just per policy.
if total_cost > policies_max_cost:
raise ExceededCostError(
'Policy cost (%d) for %s reached the '
'maximum (%d)' %
(total_cost, policy[display_name], policies_max_cost))
self.policies.append(policy)
# Do not render an empty rules if no policies have been evaluated.
if not is_policy_modified:
self.policies = []
if total_cost > 0:
logging.info('Policy %s quota cost: %d',
policy[display_name], total_cost)
def GetRuleTupleCount(dict_term: Dict[str, Any], api_version):
"""Calculate the tuple count of a rule in its dictionary form.
Quota is charged based on how complex the rules are rather than simply
limiting the number of rules.
The cost of a rule is the number of distinct protocol:port combinations plus
the number of IP addresses plus the number of targets.
Note: The goal of this function is not to determine if a rule is valid, but
to calculate its tuple count regardless of correctness.
Args:
dict_term: A dict object.
api_version: A string indicating the api version.
Returns:
int: The tuple count of the rule.
"""
layer4_count = 0
layer_4_config = ApiVersionSyntaxMap.SYNTAX_MAP[api_version]['layer_4_config']
dest_ip_range = ApiVersionSyntaxMap.SYNTAX_MAP[api_version]['dest_ip_range']
src_ip_range = ApiVersionSyntaxMap.SYNTAX_MAP[api_version]['src_ip_range']
targets_count = len(dict_term.get('targetResources', []))
if api_version == 'ga':
config = dict_term.get('match', {})
else:
config = dict_term.get('match', {}).get('config', {})
addresses_count = len(
config.get(dest_ip_range, []) + config.get(src_ip_range, []))
for l4config in config.get(layer_4_config, []):
for _ in l4config.get('ports', []):
layer4_count += 1
if l4config.get('ipProtocol'):
layer4_count += +1
return addresses_count + layer4_count + targets_count
|
|
# Copyright (c) 2019 Iotic Labs Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/Iotic-Labs/py-IoticAgent/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
from sys import version_info, exc_info
try:
BlockingIOError
except NameError:
# Python < 2.7.9 & < 3.4
from io import BlockingIOError # pylint: disable=redefined-builtin
from ssl import SSLError
from threading import Thread
from socket import timeout as SocketTimeout
from ..third.amqp import Connection, Message, exceptions
from .Profiler import profiled_thread
from .compat import raise_from, Event, RLock, monotonic, SocketError
from .utils import EventWithChangeTimes, validate_nonnegative_int
from .Exceptions import LinkException
DEBUG_ENABLED = logger.isEnabledFor(logging.DEBUG)
class AmqpLink(object): # pylint: disable=too-many-instance-attributes
"""Helper class to deal with AMQP connection.
"""
def __init__(self, host, vhost, prefix, epid, passwd, msg_callback, ka_callback, # pylint: disable=too-many-locals
send_ready_callback, sslca=None, prefetch=128, ackpc=0.5, heartbeat=30, socket_timeout=10,
startup_ignore_exc=False, conn_retry_delay=5, conn_error_log_threshold=180):
"""
`host`: Broker 'host:port'
`vhost`: Virtualhost name
`prefix`: username prefix for amqp login
`epid`: entity process ID
`passwd`: password
`msg_callback`: function callback for messages. Arguments: message
`ka_callback`: function callback for keepalives, Arguments: none
`send_ready_callback`: callback on send thread readiness. Arguments: last disconnection time
`sslca`: Server Certificate
`prefetch` max number of messages to get on amqp connection drain
`ackpc` 1..0 (percentage) maximum fraction (of prefetch) of unacknowledged messages
`heartbeat` How often (in seconds) to send AMQP heartbeat
`socket_timeout` Timeout of underlying sockets both for connection and subsequent operations
`startup_ignore_exc` On startup only, whether to ignore exceptions until socket_timeout has elapsed. This means
that e.g. an access-refused will result in a retry on startup (assuming `socket_timeout`
seconds haven't elapsed yet) rather than immediately failing.
`conn_retry_delay` How long (in seconds) to wait inbetween re-connection attempts when connection to broker is
lost
`conn_error_log_threshold` How long (in seconds) to delay logging connection failures at ERROR level. Until said
threshold is reached, the error messages will be logged at WARNING level.
"""
self.__host = host
self.__vhost = vhost
self.__prefix = prefix
self.__epid = epid
self.__passwd = passwd
#
self.__msg_callback = msg_callback
self.__ka_callback = ka_callback
self.__send_ready_callback = send_ready_callback
#
self.__sslca = sslca
self.__prefetch = prefetch
self.__ackpc = ackpc
self.__ack_threshold = self.__prefetch * self.__ackpc
self.__heartbeat = heartbeat
self.__socket_timeout = validate_nonnegative_int(socket_timeout, 'socket_timeout', allow_zero=False)
#
self.__unacked = 0
self.__last_id = None
#
self.__end = Event()
self.__recv_ready = EventWithChangeTimes()
self.__recv_thread = None
self.__send_ready = EventWithChangeTimes()
self.__send_lock = RLock()
self.__send_channel = None
self.__ka_channel = None
self.__send_thread = None
self.__send_exc_time = None
self.__send_exc = None # Used to pass exceptions to blocking calls EG .start
self.__recv_exc = None
# Whether to only rely on timeout on startup
self.__startup_ignore_exc = bool(startup_ignore_exc)
self.__conn_retry_delay = validate_nonnegative_int(conn_retry_delay, 'conn_retry_delay', allow_zero=False)
self.__conn_error_log_threshold = validate_nonnegative_int(conn_error_log_threshold, 'conn_error_log_threshold',
allow_zero=False)
def start(self):
"""start connection threads, blocks until started
"""
if not (self.__recv_thread or self.__send_thread):
self.__end.clear()
self.__send_ready.clear()
self.__recv_ready.clear()
timeout = self.__socket_timeout + 1
ignore_exc = self.__startup_ignore_exc
self.__send_exc_clear()
self.__recv_exc_clear()
# start & await send thread success (unless timeout reached or an exception has occured)
self.__send_thread = Thread(target=self.__send_run, name='amqplink_send')
self.__send_thread.start()
start_time = monotonic()
success = False
while not (success or (not ignore_exc and self.__send_exc) or monotonic() - start_time > timeout):
success = self.__send_ready.wait(.25)
if success:
# start & await receiver thread success
self.__recv_thread = Thread(target=self.__recv_run, name='amqplink_recv')
self.__recv_thread.start()
start_time = monotonic()
success = False
while not (success or (not ignore_exc and self.__recv_exc) or monotonic() - start_time >= timeout):
success = self.__recv_ready.wait(.25)
# handler either thread's failure
if not success:
logger.warning("AmqpLink Failed to start. Giving up.")
self.stop()
if self.__recv_exc:
# prioritise receive thread since this can get access-denied whereas send does not (until sending)
raise_from(LinkException('Receive thread failure'), self.__recv_exc)
elif self.__send_exc:
raise_from(LinkException('Send thread failure'), self.__send_exc)
else:
raise LinkException('Unknown link failure (timeout reached)')
else:
raise LinkException('amqplink already started')
def is_alive(self):
"""Helper function to show if send & recv Threads are running
"""
if self.__send_ready.is_set() and self.__recv_ready.is_set():
if self.__send_thread is not None and self.__recv_thread is not None:
return self.__send_thread.is_alive() and self.__recv_thread.is_alive()
return False
def stop(self):
"""disconnect, blocks until stopped
"""
self.__end.set()
if self.__recv_thread:
self.__recv_thread.join()
self.__recv_thread = None
if self.__send_thread:
self.__send_thread.join()
self.__send_thread = None
@property
def last_send_exc_time(self):
"""Timestamp (or None) at which send thread last failed
"""
return self.__send_exc_time
def __del__(self):
self.stop()
def send(self, body, content_type='application/ubjson', timeout=5):
"""timeout indicates amount of time to wait for sending thread to be ready. set to larger than zero to wait
(in seconds, fractional) or None to block.
"""
if self.__send_ready.wait(timeout):
try:
with self.__send_lock:
# access denied response might be received inside send thread rather than here how to best handle?
self.__send_channel.basic_publish(msg=Message(body, delivery_mode=2, content_type=content_type),
exchange=self.__epid)
except exceptions.AccessRefused as exc:
raise_from(LinkException('Access denied'), exc)
except (exceptions.AMQPError, SocketError) as exc:
raise_from(LinkException('amqp/transport failure'), exc)
except Exception as exc: # pylint: disable=broad-except
raise_from(LinkException('unexpected failure'), exc)
else:
exc = self.__send_exc
if exc:
raise_from(LinkException('Sender unavailable'), exc)
else:
raise LinkException('Sender unavailable (unknown error)')
@classmethod
def __get_ssl_context(cls, sslca=None):
"""Make an SSLConext for this Python version using public or sslca
"""
if ((version_info[0] == 2 and (version_info[1] >= 7 and version_info[2] >= 5)) or
(version_info[0] == 3 and version_info[1] >= 4)):
logger.debug('SSL method for 2.7.5+ / 3.4+')
# pylint: disable=no-name-in-module,import-outside-toplevel
from ssl import SSLContext, PROTOCOL_TLSv1_2, CERT_REQUIRED, OP_NO_COMPRESSION
ctx = SSLContext(PROTOCOL_TLSv1_2)
ctx.set_ciphers('HIGH:!SSLv3:!TLSv1:!aNULL:@STRENGTH')
# see CRIME security exploit
ctx.options |= OP_NO_COMPRESSION
# the following options are used to verify the identity of the broker
if sslca:
ctx.load_verify_locations(sslca)
ctx.verify_mode = CERT_REQUIRED
ctx.check_hostname = False
else:
# Verify public certifcates if sslca is None (default)
from ssl import Purpose # pylint: disable=no-name-in-module,import-outside-toplevel
ctx.load_default_certs(purpose=Purpose.SERVER_AUTH)
ctx.verify_mode = CERT_REQUIRED
ctx.check_hostname = True
elif version_info[0] == 3 and version_info[1] < 4:
logger.debug('Using SSL method for 3.2+, < 3.4')
# pylint: disable=no-name-in-module,import-outside-toplevel
from ssl import SSLContext, CERT_REQUIRED, PROTOCOL_SSLv23, OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_TLSv1
ctx = SSLContext(PROTOCOL_SSLv23)
ctx.options |= (OP_NO_SSLv2 | OP_NO_SSLv3 | OP_NO_TLSv1)
ctx.set_ciphers('HIGH:!SSLv3:!TLSv1:!aNULL:@STRENGTH')
# the following options are used to verify the identity of the broker
if sslca:
ctx.load_verify_locations(sslca)
ctx.verify_mode = CERT_REQUIRED
else:
# Verify public certifcates if sslca is None (default)
ctx.set_default_verify_paths()
ctx.verify_mode = CERT_REQUIRED
else:
raise Exception("Unsupported Python version %s" % '.'.join(str(item) for item in version_info[:3]))
return ctx
def __recv_ka_cb(self, msg):
try:
if self.__recv_ready.wait(2):
self.__ka_channel.basic_publish(msg=Message(b'', delivery_mode=1), routing_key='keep-alive',
exchange=self.__epid)
else:
logger.warning('Recv thread not ready in 2 seconds, not sending KA response')
except:
logger.warning('Failed to send KA response')
try:
self.__ka_callback()
except:
logger.exception("__recv_ka_cb exception ignored.")
def __recv_cb(self, msg):
"""Calls user-provided callback and marks message for Ack regardless of success
"""
try:
self.__msg_callback(msg)
except:
logger.exception("AmqpLink.__recv_cb exception calling msg_callback")
finally:
# only works if all messages handled in series
self.__last_id = msg.delivery_tag
self.__unacked += 1
@profiled_thread # noqa (complexity)
def __recv_run(self): # pylint: disable=too-many-branches,too-many-statements
"""Main receive thread/loop
"""
while not self.__end.is_set():
self.__unacked = 0
self.__last_id = None
try:
self.__recv_ready.clear() # Ensure event is cleared for EG network failure/retry loop
with Connection(userid=self.__prefix + self.__epid,
password=self.__passwd,
virtual_host=self.__vhost,
heartbeat=self.__heartbeat,
connect_timeout=self.__socket_timeout,
operation_timeout=self.__socket_timeout,
ssl=self.__get_ssl_context(self.__sslca),
host=self.__host) as conn,\
conn.channel(auto_encode_decode=False) as channel_data,\
conn.channel() as channel_ka:
logger.debug('Connected, using cipher %s', conn.transport.sock.cipher()[0])
channel_data.basic_qos(prefetch_size=0, prefetch_count=self.__prefetch, a_global=False)
# exclusive=True. There can be only one (receiver)
msgtag = channel_data.basic_consume(queue=self.__epid, exclusive=True, callback=self.__recv_cb)
acktag = channel_ka.basic_consume(queue=('%s_ka' % self.__epid), exclusive=True, no_ack=True,
callback=self.__recv_ka_cb)
self.__ka_channel = channel_ka
self.__recv_exc_clear(log_if_exc_set='reconnected')
self.__recv_ready.set()
try:
#
# Drain loop
while not self.__end.is_set():
try:
while not self.__end.is_set() and self.__unacked < self.__ack_threshold:
# inner loop to handle all outstanding amqp messages
conn.drain_events(.1)
except SocketTimeout:
pass
# either have waited for .1s or threshold reached, so always ack
if self.__unacked:
logger.debug('acking (%d) up to %s', self.__unacked, self.__last_id)
channel_data.basic_ack(self.__last_id, multiple=True)
self.__unacked = 0
conn.heartbeat_tick()
finally:
self.__recv_ready.clear()
try:
channel_data.basic_cancel(msgtag)
channel_ka.basic_cancel(acktag)
except:
pass
except exceptions.AccessRefused:
self.__recv_log_set_exc_and_wait('Access Refused (Credentials already in use?)')
except exceptions.ConnectionForced:
self.__recv_log_set_exc_and_wait('Disconnected by broker (ConnectionForced)')
except SocketTimeout:
self.__recv_log_set_exc_and_wait('SocketTimeout exception. wrong credentials, vhost or prefix?')
except SSLError:
self.__recv_log_set_exc_and_wait('ssl.SSLError Bad Certificate?')
except (exceptions.AMQPError, SocketError):
self.__recv_log_set_exc_and_wait('amqp/transport failure, sleeping before retry')
except:
self.__recv_log_set_exc_and_wait('unexpected failure, exiting', wait_seconds=0)
break
logger.debug('finished')
def __recv_log_set_exc_and_wait(self, msg, wait_seconds=None):
"""Equivalent to __send_log_set_exc_and_wait but for receiver thread"""
logger.log(
(
logging.ERROR if self.__recv_ready.time_since_last_clear >= self.__conn_error_log_threshold else
logging.WARNING
),
msg,
exc_info=DEBUG_ENABLED
)
self.__recv_exc = exc_info()[1]
self.__end.wait(self.__conn_retry_delay if wait_seconds is None else wait_seconds)
def __recv_exc_clear(self, log_if_exc_set=None):
"""Equivalent to __send_exc_clear"""
if not (log_if_exc_set is None or self.__recv_exc is None):
logger.info(log_if_exc_set)
self.__recv_exc = None
@profiled_thread # noqa (complexity)
def __send_run(self):
"""Send request thread
"""
while not self.__end.is_set():
try:
with Connection(userid=self.__prefix + self.__epid,
password=self.__passwd,
virtual_host=self.__vhost,
heartbeat=self.__heartbeat,
connect_timeout=self.__socket_timeout,
operation_timeout=self.__socket_timeout,
ssl=self.__get_ssl_context(self.__sslca),
host=self.__host) as conn,\
conn.channel(auto_encode_decode=False) as channel:
self.__send_channel = channel
self.__send_exc_clear(log_if_exc_set='reconnected')
self.__send_ready.set()
try:
self.__send_ready_callback(self.__send_exc_time)
while not self.__end.is_set():
with self.__send_lock:
try:
# deal with any incoming messages (AMQP protocol only, not QAPI)
conn.drain_events(0)
except (BlockingIOError, SocketTimeout):
pass
conn.heartbeat_tick()
# idle
self.__end.wait(.25)
finally:
# locked so can make sure another call to send() is not made whilst shutting down
with self.__send_lock:
self.__send_ready.clear()
except exceptions.AccessRefused:
self.__send_log_set_exc_and_wait('Access Refused (Credentials already in use?)')
except exceptions.ConnectionForced:
self.__send_log_set_exc_and_wait('Disconnected by broker (ConnectionForced)')
except SocketTimeout:
self.__send_log_set_exc_and_wait('SocketTimeout exception. wrong credentials, vhost or prefix?')
except SSLError:
self.__send_log_set_exc_and_wait('ssl.SSLError Bad Certificate?')
except (exceptions.AMQPError, SocketError):
self.__send_log_set_exc_and_wait('amqp/transport failure, sleeping before retry')
except:
self.__send_log_set_exc_and_wait('unexpected failure, exiting', wait_seconds=0)
break
logger.debug('finished')
def __send_log_set_exc_and_wait(self, msg, wait_seconds=None):
"""To be called in exception context only.
msg - message to log
wait_seconds - how long to pause for (so retry is not triggered immediately)
"""
logger.log(
(
logging.ERROR if self.__send_ready.time_since_last_clear >= self.__conn_error_log_threshold else
logging.WARNING
),
msg,
exc_info=DEBUG_ENABLED
)
self.__send_exc_time = monotonic()
self.__send_exc = exc_info()[1]
self.__end.wait(self.__conn_retry_delay if wait_seconds is None else wait_seconds)
def __send_exc_clear(self, log_if_exc_set=None):
"""Clear send exception and time. If exception was previously was set, optionally log log_if_exc_set at INFO
level.
"""
if not (log_if_exc_set is None or self.__send_exc is None):
logger.info(log_if_exc_set)
self.__send_exc_time = None
self.__send_exc = None
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fnmatch
import glob
import optparse
import os
import posixpath
import shutil
import stat
import sys
import time
import zipfile
if sys.version_info < (2, 7, 0):
sys.stderr.write("python 2.7 or later is required run this script\n")
sys.exit(1)
def IncludeFiles(filters, files):
"""Filter files based on inclusion lists
Return a list of files which match and of the Unix shell-style wildcards
provided, or return all the files if no filter is provided."""
if not filters:
return files
match = set()
for file_filter in filters:
match |= set(fnmatch.filter(files, file_filter))
return [name for name in files if name in match]
def ExcludeFiles(filters, files):
"""Filter files based on exclusions lists
Return a list of files which do not match any of the Unix shell-style
wildcards provided, or return all the files if no filter is provided."""
if not filters:
return files
match = set()
for file_filter in filters:
excludes = set(fnmatch.filter(files, file_filter))
match |= excludes
return [name for name in files if name not in match]
def CopyPath(options, src, dst):
"""CopyPath from src to dst
Copy a fully specified src to a fully specified dst. If src and dst are
both files, the dst file is removed first to prevent error. If and include
or exclude list are provided, the destination is first matched against that
filter."""
if options.includes:
if not IncludeFiles(options.includes, [src]):
return
if options.excludes:
if not ExcludeFiles(options.excludes, [src]):
return
if options.verbose:
print 'cp %s %s' % (src, dst)
# If the source is a single file, copy it individually
if os.path.isfile(src):
# We can not copy over a directory with a file.
if os.path.exists(dst):
if not os.path.isfile(dst):
msg = "cp: cannot overwrite non-file '%s' with file." % dst
raise OSError(msg)
# If the destination exists as a file, remove it before copying to avoid
# 'readonly' issues.
os.remove(dst)
# Now copy to the non-existent fully qualified target
shutil.copy(src, dst)
return
# Otherwise it's a directory, ignore it unless allowed
if os.path.isdir(src):
if not options.recursive:
print "cp: omitting directory '%s'" % src
return
# We can not copy over a file with a directory.
if os.path.exists(dst):
if not os.path.isdir(dst):
msg = "cp: cannot overwrite non-directory '%s' with directory." % dst
raise OSError(msg)
else:
# if it didn't exist, create the directory
os.makedirs(dst)
# Now copy all members
for filename in os.listdir(src):
srcfile = os.path.join(src, filename)
dstfile = os.path.join(dst, filename)
CopyPath(options, srcfile, dstfile)
return
def Copy(args):
"""A Unix cp style copy.
Copies multiple sources to a single destination using the normal cp
semantics. In addition, it support inclusion and exclusion filters which
allows the copy to skip certain types of files."""
parser = optparse.OptionParser(usage='usage: cp [Options] sources... dest')
parser.add_option(
'-R', '-r', '--recursive', dest='recursive', action='store_true',
default=False,
help='copy directories recursively.')
parser.add_option(
'-v', '--verbose', dest='verbose', action='store_true',
default=False,
help='verbose output.')
parser.add_option(
'--include', dest='includes', action='append', default=[],
help='include files matching this expression.')
parser.add_option(
'--exclude', dest='excludes', action='append', default=[],
help='exclude files matching this expression.')
options, files = parser.parse_args(args)
if len(files) < 2:
parser.error('ERROR: expecting SOURCE(s) and DEST.')
srcs = files[:-1]
dst = files[-1]
src_list = []
for src in srcs:
files = glob.glob(src)
if not files:
raise OSError('cp: no such file or directory: ' + src)
if files:
src_list.extend(files)
for src in src_list:
# If the destination is a directory, then append the basename of the src
# to the destination.
if os.path.isdir(dst):
CopyPath(options, src, os.path.join(dst, os.path.basename(src)))
else:
CopyPath(options, src, dst)
def Mkdir(args):
"""A Unix style mkdir"""
parser = optparse.OptionParser(usage='usage: mkdir [Options] DIRECTORY...')
parser.add_option(
'-p', '--parents', dest='parents', action='store_true',
default=False,
help='ignore existing parents, create parents as needed.')
parser.add_option(
'-v', '--verbose', dest='verbose', action='store_true',
default=False,
help='verbose output.')
options, dsts = parser.parse_args(args)
if len(dsts) < 1:
parser.error('ERROR: expecting DIRECTORY...')
for dst in dsts:
if options.verbose:
print 'mkdir ' + dst
try:
os.makedirs(dst)
except OSError:
if os.path.isdir(dst):
if options.parents:
continue
raise OSError('mkdir: Already exists: ' + dst)
else:
raise OSError('mkdir: Failed to create: ' + dst)
return 0
def MovePath(options, src, dst):
"""MovePath from src to dst
Moves the src to the dst much like the Unix style mv command, except it
only handles one source at a time. Because of possible temporary failures
do to locks (such as anti-virus software on Windows), the function will retry
up to five times."""
# if the destination is not an existing directory, then overwrite it
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
# If the destination exists, the remove it
if os.path.exists(dst):
if options.force:
Remove(['-vfr', dst])
if os.path.exists(dst):
raise OSError('mv: FAILED TO REMOVE ' + dst)
else:
raise OSError('mv: already exists ' + dst)
for _ in range(5):
try:
os.rename(src, dst)
return
except OSError as error:
print 'Failed on %s with %s, retrying' % (src, error)
time.sleep(5)
print 'Gave up.'
raise OSError('mv: ' + error)
def Move(args):
parser = optparse.OptionParser(usage='usage: mv [Options] sources... dest')
parser.add_option(
'-v', '--verbose', dest='verbose', action='store_true',
default=False,
help='verbose output.')
parser.add_option(
'-f', '--force', dest='force', action='store_true',
default=False,
help='force, do not error it files already exist.')
options, files = parser.parse_args(args)
if len(files) < 2:
parser.error('ERROR: expecting SOURCE... and DEST.')
srcs = files[:-1]
dst = files[-1]
if options.verbose:
print 'mv %s %s' % (' '.join(srcs), dst)
for src in srcs:
MovePath(options, src, dst)
return 0
def Remove(args):
"""A Unix style rm.
Removes the list of paths. Because of possible temporary failures do to locks
(such as anti-virus software on Windows), the function will retry up to five
times."""
parser = optparse.OptionParser(usage='usage: rm [Options] PATHS...')
parser.add_option(
'-R', '-r', '--recursive', dest='recursive', action='store_true',
default=False,
help='remove directories recursively.')
parser.add_option(
'-v', '--verbose', dest='verbose', action='store_true',
default=False,
help='verbose output.')
parser.add_option(
'-f', '--force', dest='force', action='store_true',
default=False,
help='force, do not error it files does not exist.')
options, files = parser.parse_args(args)
if len(files) < 1:
parser.error('ERROR: expecting FILE...')
try:
for pattern in files:
dst_files = glob.glob(pattern)
if not dst_files:
# Ignore non existing files when using force
if options.force:
continue
raise OSError('rm: no such file or directory: ' + pattern)
for dst in dst_files:
if options.verbose:
print 'rm ' + dst
if os.path.isfile(dst) or os.path.islink(dst):
for i in range(5):
try:
# Check every time, since it may have been deleted after the
# previous failed attempt.
if os.path.isfile(dst) or os.path.islink(dst):
os.remove(dst)
break
except OSError as error:
if i == 5:
print 'Gave up.'
raise OSError('rm: ' + str(error))
print 'Failed remove with %s, retrying' % error
time.sleep(5)
if options.recursive:
for i in range(5):
try:
if os.path.isdir(dst):
shutil.rmtree(dst)
break
except OSError as error:
if i == 5:
print 'Gave up.'
raise OSError('rm: ' + str(error))
print 'Failed rmtree with %s, retrying' % error
time.sleep(5)
except OSError as error:
print error
return 0
def MakeZipPath(os_path, isdir, iswindows):
"""Changes a path into zipfile format.
# doctest doesn't seem to honor r'' strings, so the backslashes need to be
# escaped.
>>> MakeZipPath(r'C:\\users\\foobar\\blah', False, True)
'users/foobar/blah'
>>> MakeZipPath('/tmp/tmpfoobar/something', False, False)
'tmp/tmpfoobar/something'
>>> MakeZipPath('./somefile.txt', False, False)
'somefile.txt'
>>> MakeZipPath('somedir', True, False)
'somedir/'
>>> MakeZipPath('../dir/filename.txt', False, False)
'../dir/filename.txt'
>>> MakeZipPath('dir/../filename.txt', False, False)
'filename.txt'
"""
zip_path = os_path
if iswindows:
import ntpath
# zipfile paths are always posix-style. They also have the drive
# letter and leading slashes removed.
zip_path = ntpath.splitdrive(os_path)[1].replace('\\', '/')
if zip_path.startswith('/'):
zip_path = zip_path[1:]
zip_path = posixpath.normpath(zip_path)
# zipfile also always appends a slash to a directory name.
if isdir:
zip_path += '/'
return zip_path
def OSMakeZipPath(os_path):
return MakeZipPath(os_path, os.path.isdir(os_path), sys.platform == 'win32')
def Zip(args):
"""A Unix style zip.
Compresses the listed files."""
parser = optparse.OptionParser(usage='usage: zip [Options] zipfile list')
parser.add_option(
'-r', dest='recursive', action='store_true',
default=False,
help='recurse into directories')
parser.add_option(
'-q', dest='quiet', action='store_true',
default=False,
help='quiet operation')
options, files = parser.parse_args(args)
if len(files) < 2:
parser.error('ERROR: expecting ZIPFILE and LIST.')
dest_zip = files[0]
src_args = files[1:]
src_files = []
for src_arg in src_args:
globbed_src_args = glob.glob(src_arg)
if not globbed_src_args:
if not options.quiet:
print 'zip warning: name not matched: %s' % (src_arg,)
for src_file in globbed_src_args:
src_file = os.path.normpath(src_file)
src_files.append(src_file)
if options.recursive and os.path.isdir(src_file):
for root, dirs, files in os.walk(src_file):
for dirname in dirs:
src_files.append(os.path.join(root, dirname))
for filename in files:
src_files.append(os.path.join(root, filename))
zip_stream = None
# zip_data represents a list of the data to be written or appended to the
# zip_stream. It is a list of tuples:
# (OS file path, zip path/zip file info, and file data)
# In all cases one of the |os path| or the |file data| will be None.
# |os path| is None when there is no OS file to write to the archive (i.e.
# the file data already existed in the archive). |file data| is None when the
# file is new (never existed in the archive) or being updated.
zip_data = []
new_files_to_add = [OSMakeZipPath(src_file) for src_file in src_files]
zip_path_to_os_path_dict = dict((new_files_to_add[i], src_files[i])
for i in range(len(src_files)))
write_mode = 'a'
try:
zip_stream = zipfile.ZipFile(dest_zip, 'r')
files_to_update = set(new_files_to_add).intersection(
set(zip_stream.namelist()))
if files_to_update:
# As far as I can tell, there is no way to update a zip entry using
# zipfile; the best you can do is rewrite the archive.
# Iterate through the zipfile to maintain file order.
write_mode = 'w'
for zip_path in zip_stream.namelist():
if zip_path in files_to_update:
os_path = zip_path_to_os_path_dict[zip_path]
zip_data.append((os_path, zip_path, None))
new_files_to_add.remove(zip_path)
else:
file_bytes = zip_stream.read(zip_path)
file_info = zip_stream.getinfo(zip_path)
zip_data.append((None, file_info, file_bytes))
except IOError:
pass
finally:
if zip_stream:
zip_stream.close()
for zip_path in new_files_to_add:
zip_data.append((zip_path_to_os_path_dict[zip_path], zip_path, None))
if not zip_data:
print 'zip error: Nothing to do! (%s)' % (dest_zip,)
return 1
try:
zip_stream = zipfile.ZipFile(dest_zip, write_mode, zipfile.ZIP_DEFLATED)
for os_path, file_info_or_zip_path, file_bytes in zip_data:
if isinstance(file_info_or_zip_path, zipfile.ZipInfo):
zip_path = file_info_or_zip_path.filename
else:
zip_path = file_info_or_zip_path
if os_path:
st = os.stat(os_path)
if stat.S_ISDIR(st.st_mode):
# Python 2.6 on the buildbots doesn't support writing directories to
# zip files. This was resolved in a later version of Python 2.6.
# We'll work around it by writing an empty file with the correct
# path. (This is basically what later versions do anyway.)
zip_info = zipfile.ZipInfo()
zip_info.filename = zip_path
zip_info.date_time = time.localtime(st.st_mtime)[0:6]
zip_info.compress_type = zip_stream.compression
zip_info.flag_bits = 0x00
zip_info.external_attr = (st[0] & 0xFFFF) << 16L
zip_info.CRC = 0
zip_info.compress_size = 0
zip_info.file_size = 0
zip_stream.writestr(zip_info, '')
else:
zip_stream.write(os_path, zip_path)
else:
zip_stream.writestr(file_info_or_zip_path, file_bytes)
if not options.quiet:
if zip_path in new_files_to_add:
operation = 'adding'
else:
operation = 'updating'
zip_info = zip_stream.getinfo(zip_path)
if (zip_info.compress_type == zipfile.ZIP_STORED or
zip_info.file_size == 0):
print ' %s: %s (stored 0%%)' % (operation, zip_path)
elif zip_info.compress_type == zipfile.ZIP_DEFLATED:
print ' %s: %s (deflated %d%%)' % (operation, zip_path,
100 - zip_info.compress_size * 100 / zip_info.file_size)
finally:
zip_stream.close()
return 0
def FindExeInPath(filename):
env_path = os.environ.get('PATH', '')
paths = env_path.split(os.pathsep)
def IsExecutableFile(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
if os.path.sep in filename:
if IsExecutableFile(filename):
return filename
for path in paths:
filepath = os.path.join(path, filename)
if IsExecutableFile(filepath):
return os.path.abspath(os.path.join(path, filename))
def Which(args):
"""A Unix style which.
Looks for all arguments in the PATH environment variable, and prints their
path if they are executable files.
Note: If you pass an argument with a path to which, it will just test if it
is executable, not if it is in the path.
"""
parser = optparse.OptionParser(usage='usage: which args...')
_, files = parser.parse_args(args)
if not files:
return 0
retval = 0
for filename in files:
fullname = FindExeInPath(filename)
if fullname:
print fullname
else:
retval = 1
return retval
FuncMap = {
'cp': Copy,
'mkdir': Mkdir,
'mv': Move,
'rm': Remove,
'zip': Zip,
'which': Which,
}
def main(args):
if not args:
print 'No command specified'
print 'Available commands: %s' % ' '.join(FuncMap)
return 1
func_name = args[0]
func = FuncMap.get(func_name)
if not func:
print 'Do not recognize command: %s' % func_name
print 'Available commands: %s' % ' '.join(FuncMap)
return 1
try:
return func(args[1:])
except KeyboardInterrupt:
print '%s: interrupted' % func_name
return 1
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
|
#!/usr/bin/env python
# Copyright (c) 2015 Angel Terrones (<[email protected]>)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from myhdl import Signal
from myhdl import always_comb
from myhdl import modbv
from myhdl import always
from myhdl import instances
from Core.consts import Consts
from Core.regfile import RegisterFile
from Core.regfile import RFReadPort
from Core.regfile import RFWritePort
from Core.alu import ALU
from Core.alu import ALUOp
from Core.alu import ALUPortIO
from Core.csr import CSR
from Core.csr import CSRFileRWIO
from Core.csr import CSRCMD
from Core.csr import CSRExceptionIO
from Core.csr import CSRAddressMap
from Core.imm_gen import IMMGen
from Core.mux import Mux4
from Core.mux import Mux2
def Datapath(clk,
rst,
ctrlIO,
toHost):
"""
A 5-stage data path with data forwarding.
:param clk: System clock
:param rst: System reset
:param ctrlIO: IO bundle. Interface with the cpath module
:param toHost: Connected to the CSR's mtohost register. For simulation purposes.
"""
a_pc = Signal(modbv(0)[32:])
if_pc = Signal(modbv(0)[32:])
if_instruction = Signal(modbv(0)[32:])
if_pc_next = Signal(modbv(0)[32:])
id_pc = Signal(modbv(0)[32:])
id_instruction = Signal(modbv(0)[32:])
id_rf_portA = RFReadPort()
id_rf_portB = RFReadPort()
id_imm = Signal(modbv(0)[32:])
id_rs1_data = Signal(modbv(0)[32:])
id_rs2_data = Signal(modbv(0)[32:])
id_op1 = Signal(modbv(0)[32:])
id_op2 = Signal(modbv(0)[32:])
id_op1_data = Signal(modbv(0)[32:])
id_op2_data = Signal(modbv(0)[32:])
id_mem_wdata = Signal(modbv(0)[32:])
id_pc_brjmp = Signal(modbv(0)[32:])
id_pc_jalr = Signal(modbv(0)[32:])
id_wb_addr = Signal(modbv(0)[5:])
id_csr_addr = Signal(modbv(0)[CSRAddressMap.SZ_ADDR:])
id_csr_wdata = Signal(modbv(0)[32:])
id_csr_cmd = Signal(modbv(0)[CSRCMD.SZ_CMD:])
ex_pc = Signal(modbv(0)[32:])
ex_data_out = Signal(modbv(0)[32:])
ex_alu_funct = Signal(modbv(0)[ALUOp.SZ_OP:])
ex_mem_wdata = Signal(modbv(0)[32:])
ex_mem_type = Signal(modbv(0)[Consts.SZ_MT:])
ex_mem_funct = Signal(False)
ex_mem_valid = Signal(False)
ex_mem_data_sel = Signal(modbv(0)[Consts.SZ_WB:])
ex_wb_addr = Signal(modbv(0)[5:])
ex_wb_we = Signal(False)
ex_op1_data = Signal(modbv(0)[32:])
ex_op2_data = Signal(modbv(0)[32:])
aluIO = ALUPortIO()
ex_csr_addr = Signal(modbv(0)[CSRAddressMap.SZ_ADDR:])
ex_csr_wdata = Signal(modbv(0)[32:])
ex_csr_cmd = Signal(modbv(0)[CSRCMD.SZ_CMD:])
exc_pc = Signal(modbv(0)[32:])
mem_pc = Signal(modbv(0)[32:])
mem_alu_out = Signal(modbv(0)[32:])
mem_mem_wdata = Signal(modbv(0)[32:])
mem_mem_type = Signal(modbv(0)[Consts.SZ_MT:])
mem_mem_funct = Signal(False)
mem_mem_valid = Signal(False)
mem_mem_data_sel = Signal(modbv(0)[Consts.SZ_WB:])
mem_wb_addr = Signal(modbv(0)[5:])
mem_wb_wdata = Signal(modbv(0)[32:])
mem_wb_we = Signal(False)
csr_rw = CSRFileRWIO()
csr_exc_io = CSRExceptionIO()
mem_mem_data = Signal(modbv(0)[32:])
mem_csr_addr = Signal(modbv(0)[CSRAddressMap.SZ_ADDR:])
mem_csr_wdata = Signal(modbv(0)[32:])
mem_csr_rdata = Signal(modbv(0)[32:])
mem_csr_cmd = Signal(modbv(0)[CSRCMD.SZ_CMD:])
wb_pc = Signal(modbv(0)[32:])
wb_wb_addr = Signal(modbv(0)[5:])
wb_wb_wdata = Signal(modbv(0)[32:])
wb_wb_we = Signal(False)
wb_rf_writePort = RFWritePort()
# A stage
# ----------------------------------------------------------------------
pc_mux = Mux4(ctrlIO.pc_select, # noqa
if_pc_next,
id_pc_brjmp,
id_pc_jalr,
exc_pc,
a_pc)
# IF stage
# ----------------------------------------------------------------------
@always(clk.posedge)
def pc():
if rst == 1:
if_pc.next = Consts.START_ADDR
else:
if (not ctrlIO.id_stall and not ctrlIO.full_stall) | ctrlIO.pipeline_kill:
if_pc.next = a_pc
@always_comb
def _pc_next():
ctrlIO.imem_pipeline.addr.next = if_pc
if_pc_next.next = if_pc + 4
if_instruction.next = ctrlIO.imem_pipeline.rdata
ctrlIO.imem_pipeline.wdata.next = 0xDEADC0DE
ctrlIO.imem_pipeline.typ.next = Consts.MT_W
ctrlIO.imem_pipeline.fcn.next = Consts.M_RD
ctrlIO.imem_pipeline.valid.next = True
# ID stage
# ----------------------------------------------------------------------
@always(clk.posedge)
def ifid():
if rst == 1:
id_pc.next = 0
id_instruction.next = Consts.BUBBLE
else:
id_pc.next = (id_pc if ctrlIO.id_stall or ctrlIO.full_stall else (if_pc))
id_instruction.next = (id_instruction if ctrlIO.id_stall or ctrlIO.full_stall else
(Consts.BUBBLE if ctrlIO.pipeline_kill or ctrlIO.if_kill else
(if_instruction)))
reg_file = RegisterFile(clk, # noqa
id_rf_portA,
id_rf_portB,
wb_rf_writePort)
op1_data_fwd = Mux4(ctrlIO.id_fwd1_select, # noqa
id_rs1_data,
ex_data_out,
mem_wb_wdata,
wb_wb_wdata,
id_op1)
op2_data_fwd = Mux4(ctrlIO.id_fwd2_select, # noqa
id_rs2_data,
ex_data_out,
mem_wb_wdata,
wb_wb_wdata,
id_op2)
imm_gen = IMMGen(ctrlIO.id_sel_imm, # noqa
id_instruction,
id_imm)
op1_mux = Mux4(ctrlIO.id_op1_select, # noqa
id_op1,
id_pc,
0x00000000,
0x00000BAD,
id_op1_data)
op2_mux = Mux4(ctrlIO.id_op2_select, # noqa
id_op2,
id_imm,
0x00000004,
0x00000000,
id_op2_data)
@always_comb
def _id_assignment():
ctrlIO.id_instruction.next = id_instruction
id_rf_portA.ra.next = id_instruction[20:15]
id_rf_portB.ra.next = id_instruction[25:20]
ctrlIO.id_rs1_addr.next = id_instruction[20:15]
ctrlIO.id_rs2_addr.next = id_instruction[25:20]
id_rs1_data.next = id_rf_portA.rd
id_rs2_data.next = id_rf_portB.rd
id_wb_addr.next = id_instruction[12:7]
id_csr_addr.next = id_instruction[32:20]
id_mem_wdata.next = id_op2
id_pc_brjmp.next = id_pc.signed() + id_imm.signed()
id_pc_jalr.next = (id_op1.signed() + id_imm.signed()) & ~0x01
id_csr_addr.next = id_instruction[32:20]
id_csr_cmd.next = ctrlIO.id_csr_cmd
id_csr_wdata.next = id_instruction[20:15] if id_instruction[14] else id_op1
ctrlIO.id_next_pc.next = a_pc
ctrlIO.csr_interrupt.next = csr_exc_io.interrupt
ctrlIO.csr_interrupt_code.next = csr_exc_io.interrupt_code
ctrlIO.id_op1.next = id_op1
ctrlIO.id_op2.next = id_op2
# EX stage
# ----------------------------------------------------------------------
@always(clk.posedge)
def idex():
if rst == 1:
ex_pc.next = 0
ex_op1_data.next = 0
ex_op2_data.next = 0
ex_alu_funct.next = ALUOp.OP_ADD
ex_mem_type.next = Consts.MT_X
ex_mem_funct.next = Consts.M_X
ex_mem_valid.next = False
ex_mem_wdata.next = 0
ex_mem_data_sel.next = Consts.WB_X
ex_wb_addr.next = 0
ex_wb_we.next = False
ex_csr_addr.next = 0
ex_csr_wdata.next = 0
ex_csr_cmd.next = CSRCMD.CSR_IDLE
else:
ex_pc.next = (ex_pc if (ctrlIO.id_stall or ctrlIO.full_stall) else (id_pc))
ex_op1_data.next = (ex_op1_data if (ctrlIO.id_stall or ctrlIO.full_stall) else (id_op1_data))
ex_op2_data.next = (ex_op2_data if (ctrlIO.id_stall or ctrlIO.full_stall) else (id_op2_data))
ex_alu_funct.next = (ex_alu_funct if (ctrlIO.id_stall or ctrlIO.full_stall) else (ctrlIO.id_alu_funct))
ex_mem_type.next = (ex_mem_type if (ctrlIO.id_stall or ctrlIO.full_stall) else (ctrlIO.id_mem_type))
ex_mem_wdata.next = (ex_mem_wdata if (ctrlIO.id_stall or ctrlIO.full_stall) else (id_mem_wdata))
ex_mem_data_sel.next = (ex_mem_data_sel if (ctrlIO.id_stall or ctrlIO.full_stall) else (ctrlIO.id_mem_data_sel))
ex_wb_addr.next = (ex_wb_addr if (ctrlIO.id_stall or ctrlIO.full_stall) else (id_wb_addr))
ex_csr_addr.next = (ex_csr_addr if (ctrlIO.id_stall or ctrlIO.full_stall) else (id_csr_addr))
ex_csr_wdata.next = (ex_csr_wdata if (ctrlIO.id_stall or ctrlIO.full_stall) else (id_csr_wdata))
ex_mem_funct.next = (ex_mem_funct if ctrlIO.full_stall else
(Consts.M_X if (ctrlIO.pipeline_kill or ctrlIO.id_kill or (ctrlIO.id_stall and not ctrlIO.full_stall)) else
(ctrlIO.id_mem_funct)))
ex_mem_valid.next = (ex_mem_valid if ctrlIO.full_stall else
(False if (ctrlIO.pipeline_kill or ctrlIO.id_kill or (ctrlIO.id_stall and not ctrlIO.full_stall)) else
(ctrlIO.id_mem_valid)))
ex_wb_we.next = (ex_wb_we if ctrlIO.full_stall else
(False if (ctrlIO.pipeline_kill or ctrlIO.id_kill or (ctrlIO.id_stall and not ctrlIO.full_stall)) else
(ctrlIO.id_wb_we)))
ex_csr_cmd.next = (ex_csr_cmd if ctrlIO.full_stall else
(modbv(CSRCMD.CSR_IDLE)[CSRCMD.SZ_CMD:] if (ctrlIO.pipeline_kill or ctrlIO.id_kill or (ctrlIO.id_stall and not ctrlIO.full_stall)) else
(id_csr_cmd)))
alu = ALU(clk, rst, aluIO) # noqa
@always_comb
def _ex_assignments():
aluIO.input1.next = ex_op1_data
aluIO.input2.next = ex_op2_data
aluIO.function.next = ex_alu_funct
aluIO.stall.next = ctrlIO.full_stall
aluIO.kill.next = ctrlIO.pipeline_kill
ex_data_out.next = aluIO.output
ctrlIO.ex_req_stall.next = aluIO.req_stall
ctrlIO.ex_wb_we.next = ex_wb_we
ctrlIO.ex_wb_addr.next = ex_wb_addr
# MEM stage
# ----------------------------------------------------------------------
@always(clk.posedge)
def exmem():
if rst == 1:
mem_pc.next = 0
mem_mem_valid.next = False
mem_alu_out.next = 0
mem_mem_wdata.next = 0
mem_mem_type.next = Consts.MT_X
mem_mem_funct.next = Consts.M_X
mem_mem_data_sel.next = Consts.WB_X
mem_wb_addr.next = 0
mem_wb_we.next = False
mem_csr_addr.next = 0
mem_csr_wdata.next = 0
mem_csr_cmd.next = CSRCMD.CSR_IDLE
else:
mem_pc.next = (mem_pc if ctrlIO.full_stall else ex_pc)
mem_alu_out.next = (mem_alu_out if ctrlIO.full_stall else ex_data_out)
mem_mem_wdata.next = (mem_mem_wdata if ctrlIO.full_stall else ex_mem_wdata)
mem_mem_type.next = (mem_mem_type if ctrlIO.full_stall else ex_mem_type)
mem_mem_funct.next = (mem_mem_funct if ctrlIO.full_stall else ex_mem_funct)
mem_mem_data_sel.next = (mem_mem_data_sel if ctrlIO.full_stall else ex_mem_data_sel)
mem_wb_addr.next = (mem_wb_addr if ctrlIO.full_stall else ex_wb_addr)
mem_csr_addr.next = (mem_csr_addr if ctrlIO.full_stall else ex_csr_addr)
mem_csr_wdata.next = (mem_csr_wdata if ctrlIO.full_stall else ex_csr_wdata)
mem_mem_valid.next = (mem_mem_valid if ctrlIO.full_stall else (False if ctrlIO.pipeline_kill else ex_mem_valid))
mem_wb_we.next = (mem_wb_we if ctrlIO.full_stall else (False if ctrlIO.pipeline_kill else ex_wb_we))
mem_csr_cmd.next = (mem_csr_cmd if (ctrlIO.full_stall) else (modbv(CSRCMD.CSR_IDLE)[CSRCMD.SZ_CMD:] if ctrlIO.pipeline_kill else ex_csr_cmd))
csr = CSR(clk, # noqa
rst,
csr_rw,
csr_exc_io,
ctrlIO.csr_retire,
ctrlIO.csr_prv,
ctrlIO.csr_illegal_access,
ctrlIO.full_stall,
toHost)
mdata_mux = Mux4(mem_mem_data_sel, # noqa
mem_alu_out,
mem_mem_data,
mem_csr_rdata,
0x0BADF00D,
mem_wb_wdata)
exc_pc_mux = Mux2(ctrlIO.csr_eret, # noqa
csr_exc_io.exception_handler,
csr_exc_io.epc,
exc_pc)
@always_comb
def _mem_assignments():
ctrlIO.dmem_pipeline.addr.next = mem_alu_out
ctrlIO.dmem_pipeline.wdata.next = mem_mem_wdata
ctrlIO.dmem_pipeline.fcn.next = mem_mem_funct
ctrlIO.dmem_pipeline.typ.next = mem_mem_type
ctrlIO.dmem_pipeline.valid.next = mem_mem_valid
mem_mem_data.next = ctrlIO.dmem_pipeline.rdata
csr_exc_io.exception.next = ctrlIO.csr_exception
csr_exc_io.exception_code.next = ctrlIO.csr_exception_code
csr_exc_io.eret.next = ctrlIO.csr_eret
csr_exc_io.exception_load_addr.next = mem_alu_out
csr_exc_io.exception_pc.next = mem_pc
csr_rw.addr.next = mem_csr_addr
csr_rw.cmd.next = mem_csr_cmd
csr_rw.wdata.next = mem_csr_wdata
mem_csr_rdata.next = csr_rw.rdata
ctrlIO.mem_wb_we.next = mem_wb_we
ctrlIO.mem_wb_addr.next = mem_wb_addr
# WB stage
# ----------------------------------------------------------------------
@always(clk.posedge)
def memwb():
if rst == 1:
wb_pc.next = 0
wb_wb_addr.next = 0
wb_wb_wdata.next = 0
wb_wb_we.next = False
else:
wb_pc.next = (wb_pc if ctrlIO.full_stall else mem_pc)
wb_wb_addr.next = (wb_wb_addr if ctrlIO.full_stall else mem_wb_addr)
wb_wb_wdata.next = (wb_wb_wdata if ctrlIO.full_stall else mem_wb_wdata)
wb_wb_we.next = (wb_wb_we if ctrlIO.full_stall else (False if ctrlIO.pipeline_kill else mem_wb_we))
@always_comb
def _wb_assignments():
wb_rf_writePort.wa.next = wb_wb_addr
wb_rf_writePort.wd.next = wb_wb_wdata
wb_rf_writePort.we.next = wb_wb_we
ctrlIO.wb_wb_we.next = wb_wb_we
ctrlIO.wb_wb_addr.next = wb_wb_addr
return instances()
# Local Variables:
# flycheck-flake8-maximum-line-length: 200
# flycheck-flake8rc: ".flake8rc"
# End:
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Microsoft Corporation. All rights reserved.
# Released under Apache 2.0 license as described in the file LICENSE.
#
# Authors: Soonho Kong, Leonardo de Moura, Ulrik Buchholtz
# Python 2/3 compatibility
from __future__ import print_function
import os
import sys
import getopt
import subprocess
import platform
import graphviz
def find_lean():
lean_path = None
if platform.system() == "Windows" or platform.system().startswith("MSYS"):
lean_exec_name = "lean.exe"
else:
lean_exec_name = "lean"
# Check whether lean_exec_name is in the $PATH
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, lean_exec_name)
if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK):
g_lean_path = exe_file
break
if lean_path == None:
# lean_exec_name is not the in $PATH,
# so assume we're being called from "extras/depgraph"
if platform.system().startswith("MSYS"):
# In MSYS platform, realpath has a strange behavior.
# os.path.realpath("c:\a\b\c") => \:\a\b\c
extras_depgraph_leandeps_path = os.path.abspath(os.path.normpath(__file__))
else:
extras_depgraph_leandeps_path = os.path.abspath(os.path.realpath(__file__))
lean_dir = os.path.dirname(os.path.dirname(os.path.dirname(extras_depgraph_leandeps_path)))
lean_path = os.path.join(lean_dir, "bin", lean_exec_name)
if not (os.path.isfile(lean_path) and os.access(lean_path, os.X_OK)):
print("cannot find lean executable at ", os.path.abspath(lean_path), file=sys.stderr)
sys.exit(2)
return lean_path
g_lean_path = find_lean()
class lean_exception(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def normalize_drive_name(name):
if platform.system() == "Windows":
drive, path = os.path.splitdrive(name)
if drive == None:
return name
else:
# Leo: return drive.lower() + path
return path
else:
return name
def is_olean(fname):
return fname.endswith(".olean")
def is_lean(fname):
return fname.endswith(".lean")
def is_hlean(fname):
return fname.endswith(".hlean")
LEAN_KIND=0
HLEAN_KIND=1
OLEAN_KIND=2
def get_lean_file_kind(fname):
if is_lean(fname):
return LEAN_KIND
elif is_hlean(fname):
return HLEAN_KIND
elif is_olean(fname):
return OLEAN_KIND
else:
raise lean_exception("unknown file kind: " + fname)
def olean_to_lean(fname, kind):
if kind == LEAN_KIND:
return fname[:-5] + "lean"
elif kind == HLEAN_KIND:
return fname[:-5] + "hlean"
else:
raise lean_exception("unsupported file kind: " + kind)
def lean_to_olean(fname):
if is_lean(fname):
return fname[:-4] + "olean"
elif is_hlean(fname):
return fname[:-5] + "olean"
else:
raise lean_exception("file '%s' is not a lean source file" % fname)
def lean_direct_deps(lean_file):
deps = []
proc = subprocess.Popen([g_lean_path, "--deps", lean_file],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = proc.communicate()[0].decode('utf-8').replace('\r\n', '\n')
if not proc.returncode == 0:
raise lean_exception(str(output))
for olean_file in output.strip().splitlines():
if olean_file:
deps.append(normalize_drive_name(os.path.abspath(olean_file)))
return deps
def get_lean_prefixes():
paths = []
proc = subprocess.Popen([g_lean_path, "--path"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = proc.communicate()[0].decode('utf-8').replace('\r\n', '\n')
if not proc.returncode == 0:
raise lean_exception(str(output))
for p in output.rstrip().split(':'):
paths.append(os.path.normpath(os.path.abspath(p)))
proc = subprocess.Popen([g_lean_path, "--hlean", "--path"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = proc.communicate()[0].decode('utf-8').replace('\r\n', '\n')
if not proc.returncode == 0:
raise lean_exception(str(output))
for p in output.rstrip().split(':'):
paths.append(os.path.normpath(os.path.abspath(p)))
return paths
def lean_to_module(fname, prefixes):
root, ext = os.path.splitext(fname)
for prefix in prefixes:
if root.startswith(prefix):
root = root[len(prefix)+1:]
break
return root.replace(os.sep, '.')
def lean_deps_core(lean_files, prefixes, visited, graph):
for lean_file in lean_files:
kind = get_lean_file_kind(lean_file)
if not lean_file in visited:
visited[lean_file] = True
graph.node(lean_to_module(lean_file, prefixes))
for d in lean_direct_deps(lean_file):
d = os.path.normpath(os.path.abspath(str(d)))
if is_olean(d):
d = olean_to_lean(d, kind)
graph.edge(lean_to_module(lean_file, prefixes), lean_to_module(d, prefixes))
lean_deps_core([d], prefixes, visited, graph)
def lean_deps(lean_files, prefixes, oname):
visited = dict()
graph = graphviz.Digraph(name=oname,format='dot')
lean_deps_core(lean_files, prefixes, visited, graph)
graph.render()
def usage():
print('Usage: '+sys.argv[0]+' [options] dir/file')
print("\nIf argument is a directory, all source files below that directory")
print("will be included in the graph.")
print("\n -h/--help : prints this message")
print(" -o/--output file : saves the DOT output in the specified file")
print("If no output file is specified, deps.gv and deps.gv.dot is written to.")
def main(argv):
oname = "deps"
try:
opts, args = getopt.getopt(argv, "ho:", ["help","output="])
except getopt.GetoptError as err:
print(str(err))
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-o", "--output"):
oname = arg
if len(args) != 1:
print(" Input argument required!")
usage()
sys.exit(2)
leanfiles = []
prefixes = get_lean_prefixes()
if os.path.isdir(args[0]):
for root, dirs, files in os.walk(args[0]):
for name in files:
if is_lean(name) or is_hlean(name):
leanfiles.append(os.path.abspath(os.path.normpath(os.path.join(root, name))))
prefixes.append(os.path.abspath(os.path.normpath(root)))
elif is_lean(args[0]) or is_hlean(args[0]):
leanfiles = [os.path.abspath(os.path.normpath(args[0]))]
else:
usage()
sys.exit(2)
lean_deps(leanfiles, prefixes, oname)
if __name__ == "__main__":
main(sys.argv[1:])
|
|
# Copyright (c) 2008-2015 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
import logging
import re
from datetime import datetime
try:
from enum import Enum
except ImportError:
from enum34 import Enum
from itertools import repeat
import numpy as np
from .tools import Bits, IOBuffer, NamedStruct, zlib_decompress_all_frames
from .cdm import Dataset, cf_to_proj
from ..cbook import is_string_like
from ..package_tools import Exporter
exporter = Exporter(globals())
log = logging.getLogger('metpy.io.gini')
log.addHandler(logging.StreamHandler()) # Python 2.7 needs a handler set
log.setLevel(logging.WARN)
def _make_datetime(s):
r'Converts 7 bytes from a GINI file to a `datetime` instance.'
s = bytearray(s) # For Python 2
year, month, day, hour, minute, second, cs = s
return datetime(1900 + year, month, day, hour, minute, second, 10000 * cs)
def _scaled_int(s):
r'Converts a 3 byte string to a signed integer value'
s = bytearray(s) # For Python 2
# Get leftmost bit (sign) as 1 (if 0) or -1 (if 1)
sign = 1 - ((s[0] & 0x80) >> 6)
# Combine remaining bits
int_val = (((s[0] & 0x7f) << 16) | (s[1] << 8) | s[2])
log.debug('Source: %s Int: %x Sign: %d', ' '.join(hex(c) for c in s), int_val, sign)
# Return scaled and with proper sign
return (sign * int_val) / 10000.
def _name_lookup(names):
r'Creates an io helper to convert an integer to a named value.'
mapper = dict(zip(range(len(names)), names))
def lookup(val):
return mapper.get(val, 'Unknown')
return lookup
class GiniProjection(Enum):
r'Represents projection values in GINI files'
mercator = 1
lambert_conformal = 3
polar_stereographic = 5
@exporter.export
class GiniFile(object):
r'''A class that handles reading the GINI format satellite images from the NWS.
This class attempts to decode every byte that is in a given GINI file.
Notes
-----
The internal data structures that things are decoded into are subject to change. For
a more stable interface, use the :meth:`to_dataset` method.
See Also
--------
GiniFile.to_dataset
'''
missing = 255
wmo_finder = re.compile('(T\w{3}\d{2})[\s\w\d]+\w*(\w{3})\r\r\n')
crafts = ['Unknown', 'Unknown', 'Miscellaneous', 'JERS', 'ERS/QuikSCAT', 'POES/NPOESS',
'Composite', 'DMSP', 'GMS', 'METEOSAT', 'GOES-7', 'GOES-8', 'GOES-9',
'GOES-10', 'GOES-11', 'GOES-12', 'GOES-13', 'GOES-14', 'GOES-15', 'GOES-16']
sectors = ['NH Composite', 'East CONUS', 'West CONUS', 'Alaska Regional',
'Alaska National', 'Hawaii Regional', 'Hawaii National', 'Puerto Rico Regional',
'Puerto Rico National', 'Supernational', 'NH Composite', 'Central CONUS',
'East Floater', 'West Floater', 'Central Floater', 'Polar Floater']
channels = ['Unknown', 'Visible', 'IR (3.9 micron)', 'WV (6.5/6.7 micron)',
'IR (11 micron)', 'IR (12 micron)', 'IR (13 micron)', 'IR (1.3 micron)',
'Reserved', 'Reserved', 'Reserved', 'Reserved', 'Reserved', 'LI (Imager)',
'PW (Imager)', 'Surface Skin Temp (Imager)', 'LI (Sounder)', 'PW (Sounder)',
'Surface Skin Temp (Sounder)', 'CAPE', 'Land-sea Temp', 'WINDEX',
'Dry Microburst Potential Index', 'Microburst Day Potential Index',
'Convective Inhibition', 'Volcano Imagery', 'Scatterometer', 'Cloud Top',
'Cloud Amount', 'Rainfall Rate', 'Surface Wind Speed', 'Surface Wetness',
'Ice Concentration', 'Ice Type', 'Ice Edge', 'Cloud Water Content',
'Surface Type', 'Snow Indicator', 'Snow/Water Content', 'Volcano Imagery',
'Reserved', 'Sounder (14.71 micron)', 'Sounder (14.37 micron)',
'Sounder (14.06 micron)', 'Sounder (13.64 micron)', 'Sounder (13.37 micron)',
'Sounder (12.66 micron)', 'Sounder (12.02 micron)', 'Sounder (11.03 micron)',
'Sounder (9.71 micron)', 'Sounder (7.43 micron)', 'Sounder (7.02 micron)',
'Sounder (6.51 micron)', 'Sounder (4.57 micron)', 'Sounder (4.52 micron)',
'Sounder (4.45 micron)', 'Sounder (4.13 micron)', 'Sounder (3.98 micron)',
'Sounder (3.74 micron)', 'Sounder (Visible)']
prod_desc_fmt = NamedStruct([('source', 'b'),
('creating_entity', 'b', _name_lookup(crafts)),
('sector_id', 'b', _name_lookup(sectors)),
('channel', 'b', _name_lookup(channels)),
('num_records', 'H'), ('record_len', 'H'),
('datetime', '7s', _make_datetime),
('projection', 'b', GiniProjection), ('nx', 'H'), ('ny', 'H'),
('la1', '3s', _scaled_int), ('lo1', '3s', _scaled_int)
], '>', 'ProdDescStart')
lc_ps_fmt = NamedStruct([('reserved', 'b'), ('lov', '3s', _scaled_int),
('dx', '3s', _scaled_int), ('dy', '3s', _scaled_int),
('proj_center', 'b')], '>', 'LambertOrPolarProjection')
mercator_fmt = NamedStruct([('resolution', 'b'), ('la2', '3s', _scaled_int),
('lo2', '3s', _scaled_int), ('di', 'H'), ('dj', 'H')
], '>', 'MercatorProjection')
prod_desc2_fmt = NamedStruct([('scanning_mode', 'b', Bits(3)),
('lat_in', '3s', _scaled_int), ('resolution', 'b'),
('compression', 'b'), ('version', 'b'), ('pdb_size', 'H'),
('nav_cal', 'b')], '>', 'ProdDescEnd')
nav_fmt = NamedStruct([('sat_lat', '3s', _scaled_int), ('sat_lon', '3s', _scaled_int),
('sat_height', 'H'), ('ur_lat', '3s', _scaled_int),
('ur_lon', '3s', _scaled_int)], '>', 'Navigation')
def __init__(self, filename):
r'''Create instance of `GiniFile`.
Parameters
----------
filename : str or file-like object
If str, the name of the file to be opened. Gzip-ed files are
recognized with the extension ``'.gz'``, as are bzip2-ed files with
the extension ``'.bz2'`` If `filename` is a file-like object,
this will be read from directly.
'''
if is_string_like(filename):
fobj = open(filename, 'rb')
self.filename = filename
else:
fobj = filename
self.filename = "No Filename"
# Just read in the entire set of data at once
self._buffer = IOBuffer.fromfile(fobj)
# Pop off the WMO header if we find it
self.wmo_code = ''
self._process_wmo_header()
log.debug('First wmo code: %s', self.wmo_code)
# Decompress the data if necessary, and if so, pop off new header
log.debug('Length before decompression: %s', len(self._buffer))
self._buffer = IOBuffer(self._buffer.read_func(zlib_decompress_all_frames))
log.debug('Length after decompression: %s', len(self._buffer))
# Process WMO header inside compressed data if necessary
self._process_wmo_header()
log.debug('2nd wmo code: %s', self.wmo_code)
# Read product description start
start = self._buffer.set_mark()
#: :desc: Decoded first section of product description block
#: :type: namedtuple
self.prod_desc = self._buffer.read_struct(self.prod_desc_fmt)
log.debug(self.prod_desc)
#: :desc: Decoded geographic projection information
#: :type: namedtuple
self.proj_info = None
# Handle projection-dependent parts
if self.prod_desc.projection in (GiniProjection.lambert_conformal,
GiniProjection.polar_stereographic):
self.proj_info = self._buffer.read_struct(self.lc_ps_fmt)
elif self.prod_desc.projection == GiniProjection.mercator:
self.proj_info = self._buffer.read_struct(self.mercator_fmt)
else:
log.warning('Unknown projection: %d', self.prod_desc.projection)
log.debug(self.proj_info)
# Read the rest of the guaranteed product description block (PDB)
#: :desc: Decoded second section of product description block
#: :type: namedtuple
self.prod_desc2 = self._buffer.read_struct(self.prod_desc2_fmt)
log.debug(self.prod_desc2)
if self.prod_desc2.nav_cal != 0:
# Only warn if there actually seems to be useful navigation data
if self._buffer.get_next(self.nav_fmt.size) != b'\x00' * self.nav_fmt.size:
log.warning('Navigation/Calibration unhandled: %d', self.prod_desc2.nav_cal)
if self.prod_desc2.nav_cal in (1, 2):
self.navigation = self._buffer.read_struct(self.nav_fmt)
log.debug(self.navigation)
# Catch bad PDB with size set to 0
if self.prod_desc2.pdb_size == 0:
log.warning('Adjusting bad PDB size from 0 to 512.')
self.prod_desc2 = self.prod_desc2._replace(pdb_size=512)
# Jump past the remaining empty bytes in the product description block
self._buffer.jump_to(start, self.prod_desc2.pdb_size)
# Read the actual raster
blob = self._buffer.read(self.prod_desc.num_records * self.prod_desc.record_len)
self.data = np.array(blob).reshape((self.prod_desc.num_records,
self.prod_desc.record_len))
# Check for end marker
end = self._buffer.read(self.prod_desc.record_len)
if end != b''.join(repeat(b'\xff\x00', self.prod_desc.record_len // 2)):
log.warning('End marker not as expected: %s', end)
# Check to ensure that we processed all of the data
if not self._buffer.at_end():
log.warning('Leftover unprocessed data beyond EOF marker: %s',
self._buffer.get_next(10))
def to_dataset(self):
"""Convert to a CDM dataset.
Gives a representation of the data in a much more user-friendly manner, providing
easy access to Variables and relevant attributes.
Returns
-------
Dataset
"""
ds = Dataset()
# Put in time
ds.createDimension('time', 1)
time_var = ds.createVariable('time', np.int32, dimensions=('time',))
base_time = self.prod_desc.datetime.replace(hour=0, minute=0, second=0, microsecond=0)
time_var.units = 'milliseconds since ' + base_time.isoformat()
offset = (self.prod_desc.datetime - base_time)
time_var[:] = offset.seconds * 1000 + offset.microseconds / 1000.
# Set up projection
if self.prod_desc.projection == GiniProjection.lambert_conformal:
proj_var = ds.createVariable('Lambert_Conformal', np.int32)
proj_var.grid_mapping_name = 'lambert_conformal_conic'
proj_var.standard_parallel = self.prod_desc2.lat_in
proj_var.longitude_of_central_meridian = self.proj_info.lov
proj_var.latitude_of_projection_origin = self.prod_desc2.lat_in
proj_var.earth_radius = 6371200.0
_add_projection_coords(ds, self.prod_desc, proj_var, self.proj_info.dx,
self.proj_info.dy)
elif self.prod_desc.projection == GiniProjection.polar_stereographic:
proj_var = ds.createVariable('Polar_Stereographic', np.int32)
proj_var.grid_mapping_name = 'polar_stereographic'
proj_var.longitude_of_projection_origin = self.proj_info.lov
proj_var.latitude_of_projection_origin = -90 if self.proj_info.proj_center else 90
proj_var.earth_radius = 6371200.0
_add_projection_coords(ds, self.prod_desc, proj_var, self.proj_info.dx,
self.proj_info.dy)
elif self.prod_desc.projection == GiniProjection.mercator:
proj_var = ds.createVariable('Mercator', np.int32)
proj_var.grid_mapping_name = 'mercator'
proj_var.longitude_of_projection_origin = self.prod_desc.lo1
proj_var.latitude_of_projection_origin = self.prod_desc.la1
proj_var.standard_parallel = self.prod_desc2.lat_in
proj_var.earth_radius = 6371200.0
_add_projection_coords(ds, self.prod_desc, proj_var, self.prod_desc2.resolution,
self.prod_desc2.resolution)
else:
raise NotImplementedError('Need to add more projections to dataset!')
# Now the data
name = self.prod_desc.channel
if '(' in name:
name = name.split('(')[0].rstrip()
data_var = ds.createVariable(name, self.data.dtype, ('y', 'x'),
wrap_array=np.ma.array(self.data,
mask=self.data == self.missing))
data_var.long_name = self.prod_desc.channel
data_var.missing_value = self.missing
data_var.coordinates = "y x"
data_var.grid_mapping = proj_var.name
# Add a bit more metadata
ds.satellite = self.prod_desc.creating_entity
ds.sector = self.prod_desc.sector_id
return ds
def _process_wmo_header(self):
'Read off the WMO header from the file, if necessary.'
data = self._buffer.get_next(64).decode('utf-8', 'ignore')
match = self.wmo_finder.search(data)
if match:
self.wmo_code = match.groups()[0]
self.siteID = match.groups()[-1]
self._buffer.skip(match.end())
def __str__(self):
parts = [self.__class__.__name__ + ': {0.creating_entity} {0.sector_id} {0.channel}',
'Time: {0.datetime}', 'Size: {0.ny}x{0.nx}',
'Projection: {0.projection.name}',
'Lower Left Corner (Lon, Lat): ({0.lo1}, {0.la1})',
'Resolution: {1.resolution}km']
return '\n\t'.join(parts).format(self.prod_desc, self.prod_desc2)
def _add_projection_coords(ds, prod_desc, proj_var, dx, dy):
'Helper function for adding coordinate variables (projection and lon/lat) to a dataset'
proj = cf_to_proj(proj_var)
# Get projected location of lower left point
x0, y0 = proj(prod_desc.lo1, prod_desc.la1)
# Coordinate variable for x
ds.createDimension('x', prod_desc.nx)
x_var = ds.createVariable('x', np.float64, dimensions=('x',))
x_var.units = 'm'
x_var.long_name = 'x coordinate of projection'
x_var.standard_name = 'projection_x_coordinate'
x_var[:] = x0 + np.arange(prod_desc.nx) * (1000. * dx)
# Now y
ds.createDimension('y', prod_desc.ny)
y_var = ds.createVariable('y', np.float64, dimensions=('y',))
y_var.units = 'm'
y_var.long_name = 'y coordinate of projection'
y_var.standard_name = 'projection_y_coordinate'
y_var[:] = y0 + np.arange(prod_desc.ny) * (1000. * dy)
# Get the two-D lon,lat grid as well
x, y = np.meshgrid(x_var[:], y_var[:])
lon, lat = proj(x, y, inverse=True)
lon_var = ds.createVariable('lon', np.float64, dimensions=('y', 'x'), wrap_array=lon)
lon_var.long_name = 'longitude'
lon_var.units = 'degrees_east'
lat_var = ds.createVariable('lat', np.float64, dimensions=('y', 'x'), wrap_array=lat)
lat_var.long_name = 'latitude'
lat_var.units = 'degrees_north'
|
|
# -*- coding: utf-8 -*-
import json
import random
import logging as l
import os
import scipy.sparse as ss
import scipy.io as sio
import pandas as pd
import numpy as np
from sklearn.cross_validation import StratifiedShuffleSplit as SSS
from sklearn.cross_validation import KFold
from sklearn.externals.joblib import Parallel, delayed
import ume
import ume.cross_validation
import ume.externals.jsonnet
from ume.utils import dynamic_load
from ume.metrics import multi_logloss
def hstack_mat(X, mat_fn, mat_name, conf=None):
if mat_fn.endswith('.mat'):
X_add = sio.loadmat(mat_fn)[mat_name]
X_add = ss.csr_matrix(X_add)
elif mat_fn.endswith('.npz'):
X_add = np.load(mat_fn)[mat_name]
else:
raise RuntimeError("unsupported file")
# slicing
if conf is not None and 'slice' in conf:
slice_start, slice_end = conf['slice']
slice_start, slice_end = int(slice_start), int(slice_end)
X_add = X_add[:, slice_start:slice_end]
# horizontal stack
if X is None:
return X_add
else:
if isinstance(X, np.ndarray) and isinstance(X_add, np.ndarray):
return np.hstack([X, X_add])
elif isinstance(X, ss.csr_matrix) or isinstance(X, ss.csc_matrix):
return ss.csr_matrix(
ss.hstack([X, ss.csr_matrix(X_add)])
)
else:
raise RuntimeError("Unsupported datatype")
def make_X_from_features(conf):
"""
Make X from features in a model description.
"""
X = None
for mat_info in conf['features']:
# dict format
if isinstance(mat_info, dict):
mat_fn = mat_info['file']
mat_name = mat_info['name']
X = hstack_mat(X, mat_fn, mat_name, conf=mat_info)
# string format
elif isinstance(mat_info, str) or isinstance(mat_info, unicode):
X = hstack_mat(X, mat_info, 'X', conf=None)
else:
raise RuntimeError("Unsupported feature type: {0}".format(mat_info))
if X is None:
raise RuntimeError("Feature data is required")
return X
def load_array(conf, name_path):
"""
Load array from working data
```
>> train_ids = load_array(self._conf, 'task.dataset.id_train')
```
"""
arr_ = dict(conf)
for name in name_path.split('.'):
arr_ = arr_[name]
if isinstance(arr_, dict):
mat_fn = arr_['file']
mat_name = arr_['name']
return hstack_mat(None, mat_fn, mat_name)
elif isinstance(arr_, str):
return hstack_mat(None, arr_, 'X')
else:
raise RuntimeError("Unsupported feature type: {0}".format(mat_info))
def _to_str_value(param_dict):
# Primitive values
if isinstance(param_dict, int):
return param_dict
elif isinstance(param_dict, str):
return param_dict
elif isinstance(param_dict, float):
return param_dict
converted_param_dict = {}
for k, v in param_dict.items():
if isinstance(v, int):
converted_param_dict[k] = v
elif isinstance(v, str):
converted_param_dict[k] = v
elif isinstance(v, float):
converted_param_dict[k] = v
elif isinstance(v, list):
# convert recursively
converted_param_dict[k] = [
_to_str_value(elem)
for elem in v
]
elif isinstance(v, dict):
# convert recursively
converted_param_dict[k] = _to_str_value(v)
else:
# handle unicode for py27
converted_param_dict[k] = str(v)
return converted_param_dict
class TaskSpec(object):
def __init__(self, jn):
self._conf = self.__load_conf(jn)
self._jn = jn
def __load_conf(self, jn):
json_dic = ume.externals.jsonnet.load(jn)
if "ERROR" in json_dic:
raise RuntimeError(json_dic)
return json_dic
def _load_model(self):
model_klass = dynamic_load(self._conf['model']['class'])
model_param = _to_str_value(self._conf['model'].get('params', {}))
clf = model_klass(**model_param)
return clf
def solve(self, X_train, y_train, X_test):
raise NotImplementedError("Need to implement `solve`.")
def _create_submission(self, output_fn):
raise NotImplementedError("Need to implement `_create_submission`.")
def _post_processing(self, output_fn):
if not 'task' in self._conf: return
if not 'params' in self._conf['task']: return
if not 'postprocessing' in self._conf['task']['params']: return
method = dynamic_load(self._conf['task']['params']['postprocessing'])
method(output_fn)
def _to_output_fn(self, model_fn):
output_fn = model_fn.replace(
'data/input/model/',
'data/output/')
output_fn = output_fn + '.csv'
return output_fn
def create_submission(self, model_fn):
"""
Called by `ume predict`
task specified.
"""
output_fn = self._to_output_fn(model_fn)
self._create_submission(output_fn)
self._post_processing(output_fn)
def validate(self):
"""
Called by `ume validation`
"""
X_orig = make_X_from_features(self._conf)
train_sz = len(load_array(self._conf, 'task.dataset.id_train'))
X = X_orig[:train_sz, :]
y = load_array(self._conf, 'task.dataset.y_train')
y = y.reshape(y.size)
cv_method_name = self._conf['task']['params']['validation']['class']
cv_params_name = self._conf['task']['params']['validation'].get(
'params', {})
cv_params_name = _to_str_value(cv_params_name)
cv_method = dynamic_load(cv_method_name)
mean_cv_score = cv_method(X, y, self, **cv_params_name)
task_metrics = self._conf['task']['params']['metrics']
task_method = task_metrics['method']
ume.db.add_validation_score(
os.path.basename(self._jn),
ume.__version__,
task_method,
mean_cv_score)
class MultiClassPredictProba(TaskSpec):
def __init__(self, jn):
self.required = ['features', 'model', 'task']
# Load jsonnet config
TaskSpec.__init__(self, jn)
# Check fields
for field in self.required:
if field not in self._conf.keys():
raise RuntimeError("Required field: {0}".format(field))
def solve(self, X_train, y_train, X_test):
clf = self._load_model()
l.info("Clf: {0}, X: {1}".format(str(clf), str(X_train.shape)))
clf.fit(X_train, y_train)
preds = clf.predict_proba(X_test)
del clf
return preds
def _create_submission(self, output_fn):
X_orig = make_X_from_features(self._conf)
train_ids = load_array(self._conf, 'task.dataset.id_train')
test_ids = load_array(self._conf, 'task.dataset.id_test')
train_sz = len(train_ids)
test_sz = len(test_ids)
X_train = X_orig[np.array(range(train_sz)), :]
X_test = X_orig[np.array(range(train_sz, train_sz + test_sz)), :]
y = load_array(self._conf, 'task.dataset.y_train')
y = y.reshape(y.size)
y_pred = self.solve(X_train, y, X_test)
df = pd.DataFrame(y_pred, columns=[
'Class_{0}'.format(i + 1)
for i in range(y_pred.shape[1])])
df['Id'] = test_ids.reshape(len(test_ids)).tolist()
df.set_index('Id').to_csv(output_fn)
class DebugMultiClassPredictProba(TaskSpec):
def __init__(self, jn):
self.required = ['features', 'model', 'task']
# Load jsonnet config
TaskSpec.__init__(self, jn)
# Check fields
for field in self.required:
if field not in self._conf.keys():
raise RuntimeError("Required field: {0}".format(field))
def solve(self, X_train, y_train, X_test, y_test):
clf = self._load_model()
clf.fit(X_train, y_train)
clf._set_test_label(y_test)
preds = clf.predict_proba(X_test)
del clf
return preds
def validate(self):
X_orig = make_X_from_features(self._conf)
train_sz = len(load_array(self._conf, 'task.dataset.id_train'))
X = X_orig[np.array(range(train_sz)), :]
y = load_array(self._conf, 'task.dataset.y_train')
y = y.reshape(y.size)
task_metrics = self._conf['task']['params']['metrics']
if isinstance(task_metrics, str):
task_method = task_metrics
elif isinstance(task_metrics, dict):
task_method = task_metrics['method']
else:
raise RuntimeError("invalid task metrics")
metrics = dynamic_load(task_method)
cv_scores = []
kf = KFold(X.shape[0], n_folds=10, shuffle=True, random_state=777)
for kth, (train_idx, test_idx) in enumerate(kf):
X_train, X_test = X[train_idx], X[test_idx]
y_train, y_test = y[train_idx], y[test_idx]
y_pred = self.solve(X_train, y_train, X_test, y_test)
score = metrics(y_test, y_pred)
l.info("KFold: ({0}) {1:.4f}".format(kth, score))
cv_scores.append(score)
mean_cv_score = np.mean(cv_scores)
l.info("CV Score: {0:.4f} (var: {1:.6f})".format(
mean_cv_score,
np.var(cv_scores)))
ume.db.add_validation_score(
os.path.basename(self._jn),
ume.__version__,
task_method,
mean_cv_score)
def _create_submission(self, output_fn):
X_orig = make_X_from_features(self._conf)
train_ids = load_array(self._conf, 'task.dataset.id_train')
test_ids = load_array(self._conf, 'task.dataset.id_test')
train_sz = len(train_ids)
test_sz = len(test_ids)
X_train = X_orig[np.array(range(train_sz)), :]
X_test = X_orig[np.array(range(train_sz, train_sz + test_sz)), :]
y = load_array(self._conf, 'task.dataset.y_train')
y = y.reshape(y.size)
y_pred = self.solve(X_train, y, X_test)
df = pd.DataFrame(y_pred, columns=[
'Class_{0}'.format(i + 1)
for i in range(y_pred.shape[1])])
df['Id'] = test_ids.reshape(len(test_ids)).tolist()
df.set_index('Id').to_csv(output_fn)
class BinaryClassPredictProba(TaskSpec):
def __init__(self, jn):
self.required = ['features', 'model', 'task']
# Load jsonnet config
TaskSpec.__init__(self, jn)
# Check fields
for field in self.required:
if field not in self._conf.keys():
raise RuntimeError("Required field: {0}".format(field))
def solve(self, X_train, y_train, X_test):
clf = self._load_model()
l.info("Clf: {0}, X: {1}".format(str(clf), str(X_train.shape)))
clf.fit(X_train, y_train)
preds = clf.predict_proba(X_test)
#try:
# preds = clf.predict_proba(X_test)
#except:
# preds = clf.decision_function(X_test)
if len(preds.shape) > 1 and preds.shape[1] == 2:
preds = preds[:, 1]
del clf
return preds
def _create_submission(self, output_fn):
X_orig = make_X_from_features(self._conf)
train_ids = load_array(self._conf, 'task.dataset.id_train')
test_ids = load_array(self._conf, 'task.dataset.id_test')
train_sz = len(train_ids)
test_sz = len(test_ids)
X_train = X_orig[np.array(range(train_sz)), :]
X_test = X_orig[np.array(range(train_sz, train_sz + test_sz)), :]
y = load_array(self._conf, 'task.dataset.y_train')
y = y.reshape(y.size)
y_pred = self.solve(X_train, y, X_test)
df = pd.DataFrame(y_pred, columns=['Proba'])
df['Id'] = test_ids.reshape(len(test_ids)).tolist()
df.set_index('Id').to_csv(output_fn)
class BinaryClassPredict(TaskSpec):
def __init__(self, jn):
self.required = ['features', 'model', 'task']
# Load jsonnet config
TaskSpec.__init__(self, jn)
# Check fields
for field in self.required:
if field not in self._conf.keys():
raise RuntimeError("Required field: {0}".format(field))
def solve(self, X_train, y_train, X_test):
clf = self._load_model()
l.info("Clf: {0}, X: {1}".format(str(clf), str(X_train.shape)))
clf.fit(X_train, y_train)
preds = clf.predict(X_test)
#try:
# preds = clf.predict_proba(X_test)
#except:
# preds = clf.decision_function(X_test)
if len(preds.shape) > 1 and preds.shape[1] == 2:
preds = preds[:, 1]
del clf
return preds
def _create_submission(self, output_fn):
X_orig = make_X_from_features(self._conf)
train_ids = load_array(self._conf, 'task.dataset.id_train')
test_ids = load_array(self._conf, 'task.dataset.id_test')
train_sz = len(train_ids)
test_sz = len(test_ids)
X_train = X_orig[np.array(range(train_sz)), :]
X_test = X_orig[np.array(range(train_sz, train_sz + test_sz)), :]
y = load_array(self._conf, 'task.dataset.y_train')
y = y.reshape(y.size)
y_pred = self.solve(X_train, y, X_test)
df = pd.DataFrame(y_pred, columns=['Proba'])
df['Id'] = test_ids.reshape(len(test_ids)).tolist()
df.set_index('Id').to_csv(output_fn)
class Regression(TaskSpec):
def __init__(self, jn):
self.required = ['features', 'model', 'task']
# Load jsonnet config
TaskSpec.__init__(self, jn)
# Check fields
for field in self.required:
if field not in self._conf.keys():
raise RuntimeError("Required field: {0}".format(field))
def solve(self, X_train, y_train, X_test):
clf = self._load_model()
l.info("Clf: {0}, X: {1}".format(str(clf), str(X_train.shape)))
clf.fit(X_train, y_train)
preds = clf.predict(X_test)
del clf
return preds
def _create_submission(self, output_fn):
X_orig = make_X_from_features(self._conf)
train_ids = load_array(self._conf, 'task.dataset.id_train')
test_ids = load_array(self._conf, 'task.dataset.id_test')
train_sz = len(train_ids)
test_sz = len(test_ids)
X_train = X_orig[np.array(range(train_sz)), :]
X_test = X_orig[np.array(range(train_sz, train_sz + test_sz)), :]
y = load_array(self._conf, 'task.dataset.y_train')
y = y.reshape(y.size)
y_pred = self.solve(X_train, y, X_test)
df = pd.DataFrame(y_pred, columns=['Prediction'])
df['Id'] = test_ids.reshape(len(test_ids)).tolist()
df.set_index('Id').to_csv(output_fn)
def main():
task = MultiClassPredictProba("data/input/model/xgb.jn")
task.validate()
task.create_submission()
if __name__ == '__main__':
l.basicConfig(format=u'[%(asctime)s] %(message)s', level=l.INFO)
random.seed(777)
main()
|
|
import matplotlib
from kid_readout.measurement.legacy import sweeps
from kid_readout.roach import baseband
matplotlib.use('agg')
import numpy as np
import time
import sys
from kid_readout.utils import data_file
from kid_readout.analysis.resonator.legacy_resonator import fit_best_resonator
from kid_readout.equipment import hittite_controller
from kid_readout.equipment import lockin_controller
from kid_readout.equipment.agilent_33220 import FunctionGenerator
fg = FunctionGenerator()
hittite = hittite_controller.hittiteController()
lockin = lockin_controller.lockinController()
print lockin.get_idn()
ri = baseband.RoachBaseband()
ri.initialize()
f0s = np.load('/home/gjones/kid_readout/apps/sc5x4_0813f12.npy')
f0s.sort()
f0s = f0s[[0,1,2,3,4,5,6,7,8,9,10,13,14,15,16,17]] # remove close packed resonators to enable reading out all simultaneously
suffix = "mmw"
mmw_source_modulation_freq = 25.0
mmw_atten_turns = (7.0,7.0)
nf = len(f0s)
atonce = 16
if nf % atonce > 0:
print "extending list of resonators to make a multiple of ",atonce
f0s = np.concatenate((f0s,np.arange(1,1+atonce-(nf%atonce))+f0s.max()))
nsamp = 2**18
step = 1
nstep = 80
f0binned = np.round(f0s*nsamp/512.0)*512.0/nsamp
offset_bins = np.arange(-(nstep+1),(nstep+1))*step
offsets = offset_bins*512.0/nsamp
offsets = np.concatenate(([offsets.min()-20e-3,],offsets,[offsets.max()+20e-3]))
print f0s
print offsets*1e6
print len(f0s)
mmw_freqs = np.linspace(135e9,165e9,2000)
fundamental_freqs = mmw_freqs/12.0
fg.set_dc_voltage(0.0)
if False:
from kid_readout.equipment.parse_srs import get_all_temperature_data
while True:
temp = get_all_temperature_data()[1][-1]
print "mk stage at", temp
if temp > 0.348:
break
time.sleep(300)
time.sleep(600)
start = time.time()
use_fmin = False
attenlist = [39]
for atten in attenlist:
hittite.off()
print "setting attenuator to",atten
ri.set_dac_attenuator(atten)
measured_freqs = sweeps.prepare_sweep(ri,f0binned,offsets,nsamp=nsamp)
print "loaded waveforms in", (time.time()-start),"seconds"
sweep_data = sweeps.do_prepared_sweep(ri, nchan_per_step=atonce, reads_per_step=4)
orig_sweep_data = sweep_data
meas_cfs = []
idxs = []
delays = []
for m in range(len(f0s)):
fr,s21,errors = sweep_data.select_by_freq(f0s[m])
thiscf = f0s[m]
res = fit_best_resonator(fr[1:-1],s21[1:-1],errors=errors[1:-1]) #Resonator(fr,s21,errors=errors)
delay = res.delay
delays.append(delay)
s21 = s21*np.exp(2j*np.pi*res.delay*fr)
res = fit_best_resonator(fr,s21,errors=errors)
fmin = fr[np.abs(s21).argmin()]
print "s21 fmin", fmin, "original guess",thiscf,"this fit", res.f_0, "delay",delay,"resid delay",res.delay
if use_fmin:
meas_cfs.append(fmin)
else:
if abs(res.f_0 - thiscf) > 0.1:
if abs(fmin - thiscf) > 0.1:
print "using original guess"
meas_cfs.append(thiscf)
else:
print "using fmin"
meas_cfs.append(fmin)
else:
print "using this fit"
meas_cfs.append(res.f_0)
idx = np.unravel_index(abs(measured_freqs - meas_cfs[-1]).argmin(),measured_freqs.shape)
idxs.append(idx)
delay = np.median(delays)
print "median delay is ",delay
nsamp = 2**22
step = 1
offset_bins = np.array([-8,-4,-2,-1,0,1,2,4])
offset_bins = np.concatenate(([-40,-20],offset_bins,[20,40]))
offsets = offset_bins*512.0/nsamp
meas_cfs = np.array(meas_cfs)
f0binned_meas = np.round(meas_cfs*nsamp/512.0)*512.0/nsamp
f0s = f0binned_meas
measured_freqs = sweeps.prepare_sweep(ri,f0binned_meas,offsets,nsamp=nsamp)
print "loaded updated waveforms in", (time.time()-start),"seconds"
sys.stdout.flush()
time.sleep(1)
df = data_file.DataFile(suffix=suffix)
df.nc.mmw_atten_turns=mmw_atten_turns
df.log_hw_state(ri)
sweep_data = sweeps.do_prepared_sweep(ri, nchan_per_step=atonce, reads_per_step=4, sweep_data=orig_sweep_data)
df.add_sweep(sweep_data)
meas_cfs = []
idxs = []
for m in range(len(f0s)):
fr,s21,errors = sweep_data.select_by_freq(f0s[m])
thiscf = f0s[m]
s21 = s21*np.exp(2j*np.pi*delay*fr)
res = fit_best_resonator(fr,s21,errors=errors) #Resonator(fr,s21,errors=errors)
fmin = fr[np.abs(s21).argmin()]
print "s21 fmin", fmin, "original guess",thiscf,"this fit", res.f_0
if use_fmin:
meas_cfs.append(fmin)
else:
if abs(res.f_0 - thiscf) > 0.1:
if abs(fmin - thiscf) > 0.1:
print "using original guess"
meas_cfs.append(thiscf)
else:
print "using fmin"
meas_cfs.append(fmin)
else:
print "using this fit"
meas_cfs.append(res.f_0)
idx = np.unravel_index(abs(measured_freqs - meas_cfs[-1]).argmin(),measured_freqs.shape)
idxs.append(idx)
print meas_cfs
ri.add_tone_freqs(np.array(meas_cfs))
ri.select_bank(ri.tone_bins.shape[0]-1)
ri._sync()
time.sleep(0.5)
#raw_input("turn on LED take data")
hittite.on()
df.log_hw_state(ri)
nsets = len(meas_cfs)/atonce
tsg = None
for iset in range(nsets):
selection = range(len(meas_cfs))[iset::nsets]
ri.select_fft_bins(selection)
ri._sync()
time.sleep(0.4)
t0 = time.time()
for freq in mmw_freqs:
hittite.set_freq(freq/12.0)
time.sleep(0.1)
dmod,addr = ri.get_data_seconds(2)
x,y,r,theta = lockin.get_data()
print freq,#nsets,iset,tsg
tsg = df.add_timestream_data(dmod, ri, t0, tsg=tsg, mmw_source_freq=freq,
mmw_source_modulation_freq=mmw_source_modulation_freq,
zbd_voltage=x)
df.sync()
print "done with sweep"
df.close()
7/0
fg.set_dc_voltage(0.25)
tstart = time.time()
while time.time() - tstart < 1800:
df.log_hw_state(ri)
nsets = len(meas_cfs)/atonce
tsg = None
for iset in range(nsets):
selection = range(len(meas_cfs))[iset::nsets]
ri.select_fft_bins(selection)
ri._sync()
time.sleep(0.4)
t0 = time.time()
for freq in mmw_freqs:
hittite.set_freq(freq/12.0)
time.sleep(0.1)
dmod,addr = ri.get_data_seconds(2)
x,y,r,theta = lockin.get_data()
print freq,#nsets,iset,tsg
tsg = df.add_timestream_data(dmod, ri, t0, tsg=tsg, mmw_source_freq=freq,
mmw_source_modulation_freq=mmw_source_modulation_freq,
zbd_voltage=x)
df.sync()
time.sleep(300)
df.nc.close()
nsamp = 2**18
step = 1
nstep = 80
f0binned = np.round(f0s*nsamp/512.0)*512.0/nsamp
offset_bins = np.arange(-(nstep+1),(nstep+1))*step
offsets = offset_bins*512.0/nsamp
offsets = np.concatenate(([offsets.min()-20e-3,],offsets,[offsets.max()+20e-3]))
for atten in attenlist:
hittite.off()
print "setting attenuator to",atten
ri.set_dac_attenuator(atten)
measured_freqs = sweeps.prepare_sweep(ri,f0binned,offsets,nsamp=nsamp)
print "loaded waveforms in", (time.time()-start),"seconds"
sweep_data = sweeps.do_prepared_sweep(ri, nchan_per_step=atonce, reads_per_step=4)
orig_sweep_data = sweep_data
meas_cfs = []
idxs = []
delays = []
for m in range(len(f0s)):
fr,s21,errors = sweep_data.select_by_freq(f0s[m])
thiscf = f0s[m]
res = fit_best_resonator(fr[1:-1],s21[1:-1],errors=errors[1:-1]) #Resonator(fr,s21,errors=errors)
delay = res.delay
delays.append(delay)
s21 = s21*np.exp(2j*np.pi*res.delay*fr)
res = fit_best_resonator(fr,s21,errors=errors)
fmin = fr[np.abs(s21).argmin()]
print "s21 fmin", fmin, "original guess",thiscf,"this fit", res.f_0, "delay",delay,"resid delay",res.delay
if use_fmin:
meas_cfs.append(fmin)
else:
if abs(res.f_0 - thiscf) > 0.1:
if abs(fmin - thiscf) > 0.1:
print "using original guess"
meas_cfs.append(thiscf)
else:
print "using fmin"
meas_cfs.append(fmin)
else:
print "using this fit"
meas_cfs.append(res.f_0)
idx = np.unravel_index(abs(measured_freqs - meas_cfs[-1]).argmin(),measured_freqs.shape)
idxs.append(idx)
delay = np.median(delays)
print "median delay is ",delay
nsamp = 2**22
step = 1
offset_bins = np.array([-8,-4,-2,-1,0,1,2,4])
offset_bins = np.concatenate(([-40,-20],offset_bins,[20,40]))
offsets = offset_bins*512.0/nsamp
meas_cfs = np.array(meas_cfs)
f0binned_meas = np.round(meas_cfs*nsamp/512.0)*512.0/nsamp
f0s = f0binned_meas
measured_freqs = sweeps.prepare_sweep(ri,f0binned_meas,offsets,nsamp=nsamp)
print "loaded updated waveforms in", (time.time()-start),"seconds"
sys.stdout.flush()
time.sleep(1)
df = data_file.DataFile(suffix=suffix)
df.nc.mmw_atten_turns=mmw_atten_turns
df.log_hw_state(ri)
sweep_data = sweeps.do_prepared_sweep(ri, nchan_per_step=atonce, reads_per_step=4, sweep_data=orig_sweep_data)
df.add_sweep(sweep_data)
meas_cfs = []
idxs = []
for m in range(len(f0s)):
fr,s21,errors = sweep_data.select_by_freq(f0s[m])
thiscf = f0s[m]
s21 = s21*np.exp(2j*np.pi*delay*fr)
res = fit_best_resonator(fr,s21,errors=errors) #Resonator(fr,s21,errors=errors)
fmin = fr[np.abs(s21).argmin()]
print "s21 fmin", fmin, "original guess",thiscf,"this fit", res.f_0
if use_fmin:
meas_cfs.append(fmin)
else:
if abs(res.f_0 - thiscf) > 0.1:
if abs(fmin - thiscf) > 0.1:
print "using original guess"
meas_cfs.append(thiscf)
else:
print "using fmin"
meas_cfs.append(fmin)
else:
print "using this fit"
meas_cfs.append(res.f_0)
idx = np.unravel_index(abs(measured_freqs - meas_cfs[-1]).argmin(),measured_freqs.shape)
idxs.append(idx)
print meas_cfs
ri.add_tone_freqs(np.array(meas_cfs))
ri.select_bank(ri.tone_bins.shape[0]-1)
ri._sync()
time.sleep(0.5)
#raw_input("turn on LED take data")
hittite.on()
df.log_hw_state(ri)
nsets = len(meas_cfs)/atonce
tsg = None
for iset in range(nsets):
selection = range(len(meas_cfs))[iset::nsets]
ri.select_fft_bins(selection)
ri._sync()
time.sleep(0.4)
t0 = time.time()
for freq in mmw_freqs:
hittite.set_freq(freq/12.0)
time.sleep(0.1)
dmod,addr = ri.get_data_seconds(2)
x,y,r,theta = lockin.get_data()
print freq,#nsets,iset,tsg
tsg = df.add_timestream_data(dmod, ri, t0, tsg=tsg, mmw_source_freq=freq,
mmw_source_modulation_freq=mmw_source_modulation_freq,
zbd_voltage=x)
df.sync()
print "done with sweep"
df.nc.close()
print "completed in",((time.time()-start)/60.0),"minutes"
|
|
#!python
# Implementation of Clustering Algorithms in POVME
# By Celia Wong
# Advised by Jeff Wagner
# Amaro Lab, UCSD
import scipy.cluster.vq, scipy.cluster.hierarchy
import argparse
import numpy
import sys
import os
import csv
import copy
import itertools
import collections
#import fnmatch
import pylab
import POVME.packages.binana.peel as peel
#import matplotlib.pyplot
class InputReader():
def __init__(self):
#self.coordinates = []
#self.frames = 0
self.overlapMatrix = []
self.prefixToTrajectory = {}
self.indexToNpyFile = {}
self.indexToFrame = {}
self.indexToPrefix = {}
# Save each frame in the trajectory as a set
''' def read_traj(self,traj_file):
trajectory = open(traj_file,'r')
frame_coordinates = []
# temp_coordinates = numoy.array([])
for line in trajectory:
if line[0] == 'E':
self.coordinates.append(frame_coordinates)
# numpy.append(self.coordinates,frame_coordinates)
frame_coordinates = []
self.frames += 1
elif line[0] == 'A':
#output_file_name = 'hierarchical_' + command_input['output_name'] + '.csv'
if line[17] != 'X':
frame_coordinates.append((float(line[29:37].strip()),float(line[38:45].strip()),float(line[46:54].strip())))
#numpy.append(frame_coordinates,(float(line[29:37].strip()),float(line[38:45].strip()),float(line[46:54].strip())))
self.coordinates = numpy.array(self.coordinates)
trajectory.close()
'''
def read_indexFile(self,indexToFrameFile):
if indexToFrameFile == None:
return
#self.indexToNpyFile = {}
#self.indexToFrame = {}
with open(indexToFrameFile) as csvfile:
fieldnames = ['index','frameFile']
reader = csv.DictReader(csvfile, fieldnames=fieldnames)
for row in reader:
self.indexToNpyFile[int(row['index'])] = row['frameFile']
if self.indexToFrame != None:
try:
frameNumber = row['frameFile'].split('frame_')[-1].replace('.npy','')
self.indexToFrame[int(row['index'])] = int(frameNumber)
framePrefix = row['frameFile'].split('/')[-1].replace('frame_%s.npy'%(frameNumber),'')
self.indexToPrefix[int(row['index'])] = framePrefix
except:
raise Exception("Unable to strip frame number or prefix from input filename %s. Disabling frame number output." %(row['frameFile']))
#self.indexToFrame = None
#self.indexToPrefix = None
#self.coordinates.append(numpy.load(row['frame']))
def read_overlap(self,overlap_file):
overlap_suffix = overlap_file[-4:]
if overlap_suffix == '.npy':
self.overlapMatrix = numpy.load(open(overlap_file))
elif overlap_suffix[-4:] == '.csv':
overlap = open(overlap_file,'r')
overlap_values = csv.reader(overlap, delimiter = ',')
for line in overlap_values:
#self.overlapMatrix.append([float(x) for x in line])
row = []
#self.frames += 1
for value in line:
row.append(float(value))
self.overlapMatrix.append(row)
self.overlapMatrix = numpy.array(self.overlapMatrix)
#self.oneMinusOverlapMatrix = 1. - numpy.array(self.overlapMatrix)
overlap.close()
else:
raise Exception('Unrecognized overlap matrix input file type:', overlap_suffix)
def parse_traj_inputs(self, argst, argsT):
if argsT != None:
for argT in argsT:
data = open(argT).read()
datasp = data.strip().split()
for line in datasp:
linesp = line.split(':')
prefix = linesp[0].strip()
trajFile = linesp[1].strip()
self.prefixToTrajectory[prefix] = trajFile
for argt in argst:
argtsp = argt.split(':')
prefix = argtsp[0].strip()
trajFile = argtsp[1].strip()
self.prefixToTrajectory[prefix] = trajFile
## Check to ensure these files exist
for prefix in self.prefixToTrajectory:
trajFileName = self.prefixToTrajectory[prefix]
if not(os.path.exists(trajFileName)):
raise Exception('ERROR - trajectory file %s doesn\'t exist!' %(trajFileName))
class Cluster():
#def __init__(self,coordinates,frames,overlap_values,frameToFileName):
def __init__(self,input_reader):
#self.coordinates = coordinates
#self.frames = frames
self.overlap_values = input_reader.overlapMatrix
self.one_minus_overlap_values = 1. - self.overlap_values
self.frames = len(self.overlap_values)
self.indexToFrame = input_reader.indexToFrame
self.indexToPrefix = input_reader.indexToPrefix
self.indexToNpyFile = input_reader.indexToNpyFile
self.prefixToTrajectory = input_reader.prefixToTrajectory
self.avgLinkage = None
self.whited_overlap_values = None
self.do_sanity_checks()
def do_sanity_checks(self):
self.check_commandline_inputs()
self.ensure_file_prefixes_map_to_trajectories()
#self.ensure_trajectories_exist() #Check performed during -T argument parsing instead
def check_commandline_inputs(self):
if (self.indexToNpyFile == {}) and (self.prefixToTrajectory != {}):
raise Exception("ERROR! Given pdb trajectory (-t/T) but not given index file (-i). Output will return matrix indices instead of frame numbers or cluster representative structures.")
elif self.indexToNpyFile == {}:
print "Not given index file (-i). Clustering will return matrix indices, but not trajectory frame numbers or members/representatives."
elif (self.indexToNpyFile != {}) and(self.prefixToTrajectory == {}):
print "Given index file (-i) but not prefix-to-trajectory mapping (-t or -T). Clustering will return prefix and frame numbers of cluster members, but will not extract representatives."
elif (self.indexToNpyFile != {}) and (self.prefixToTrajectory != {}):
print "Given index file (-i) and prefix-to-trajectory mapping (-t or -T). Clustering will return prefix and frame numbers of cluster members, and will extract representatives."
def ensure_file_prefixes_map_to_trajectories(self):
if (self.prefixToTrajectory == {}) or (self.indexToNpyFile == {}):
print "No -i and/or -t/T arguments given - Skipping file-prefix-to-trajectory mapping completeness test"
return
else:
allPrefixesSet = set(self.indexToPrefix.values())
for prefix in allPrefixesSet:
if not(prefix in self.prefixToTrajectory.keys()):
raise Exception('File prefix %s not found in -t arguments (which are %r)' %(prefix, self.prefixToTrajectory.keys()))
return
def ensure_trajectories_exist(self):
if self.prefixToTrajectory == {}:
print "No -t/T arguments given. Skipping trajectory-file-existence check"
else:
for trajectoryFile in self.prefixToTrajectory.values():
if not(os.path.exists(trajectoryFile)):
raise Exception("Trajectory file %s not found" %(trajectoryFile))
def kmeans_cluster(self,number_clusters):
if self.whited_overlap_values == None:
self.whited_overlap_values = scipy.cluster.vq.whiten(self.overlap_values)
frames,result = scipy.cluster.vq.kmeans(self.whited_overlap_values, number_clusters)
#frames,result = scipy.cluster.vq.kmeans2(self.whited_overlap_values, number_clusters)
code, dist = scipy.cluster.vq.vq(self.whited_overlap_values,frames)
print "The clusters are {0}".format(code)
print code.shape
list_of_clusters = self.separate_clusters(code)
#return code
return list_of_clusters
def hierarchical_cluster(self, number_clusters):
if self.avgLinkage == None:
try:
overlapHash = str(numpy.sum(self.one_minus_overlap_values.flatten()[::self.one_minus_overlap_values.size/100]))[-7:]
except:
overlapHash = str(numpy.sum(self.one_minus_overlap_values.flatten()))[-7:]
linkageFile = 'avg_linkage_hash_%s.npy' %(overlapHash)
if os.path.exists(linkageFile):
self.avgLinkage = numpy.load(linkageFile)
else:
self.avgLinkage = scipy.cluster.hierarchy.average(self.one_minus_overlap_values)
numpy.save(linkageFile, self.avgLinkage)
result = scipy.cluster.hierarchy.fcluster(self.avgLinkage,
number_clusters,
criterion='maxclust')
#result = scipy.cluster.hierarchy.linkage(self.overlap_values)
# Hierarchical clustering seems to have a bad habit of returning clusters nubered starting from 1 instead of 0. This is undesired behavior.
result = result - 1
clusters = self.separate_clusters(result)
return clusters
# scipy.cluster.hierarchy.dendrogram(result)
''' separate_cluster_traj will seperate the original trajectory into the
number of clusters specified. Each new cluster traj will only contain the
frames that belong in that cluster.
cluster_result = list that is number of frames long and contain the cluster
each frame is grouped with.
number_clusters = the number of clusters specified.
file_name = the file name that was passed into main() as a command line arg.
traj_file = the original trajectory file containing all frames
'''
def separate_cluster_traj(self,cluster_result,number_clusters,file_name,traj_file,output_file):
list_writes = [None]*number_clusters
'''Opening n number of clusters (n = number of clusters previously indicated)'''
for i in range(number_clusters):
list_writes[i] = open('cluster_'+ str(i)+'.pdb','wb')
initial_pdb = open(traj_file,'rb')
current_frame = 0
current_cluster = cluster_result[current_frame]
for line in initial_pdb:
if line[0] == 'E':
list_writes[current_cluster].write(line)
if current_frame < len(cluster_result)-1:
current_frame += 1
current_cluster = cluster_result[current_frame]
else:
list_writes[current_cluster].write(line)
initial_pdb.close()
for i in range(number_clusters):
list_writes[i].close()
''' Separates the frames into the set clusters
'''
def separate_clusters(self,cluster_results):
# print "cluster results: {0}".format(cluster_results)
total_num_clusters = len(set(cluster_results))
list_clusters = [list([]) for i in xrange(total_num_clusters)]
for i in range(len(cluster_results)):
# print "Cluster_res for {0} is {1}".format(i, cluster_results[i])
list_clusters[cluster_results[i]].append(i)
list_clusters.sort(key=len, reverse=True)
return list_clusters
''' csv file containing differences in binding site is first argument already read and stored into memory by previous command '''
#def find_centroids(self,binding_volume_matrix,cluster_results,number_clusters,number_frames, indexToFrame):
def find_centroids(self, list_of_clusters, outputPrefix):
#number_clusters = len(set(cluster_results))
#list_of_clusters = self.separate_clusters(cluster_results)
#print list_of_clusters
''' set to some arbitrary large number? '''
#shortest_average_distance = [1.e20] * number_clusters
#centroid_list = [[0] for i in xrange(number_clusters)]
centroid_list = []
for cluster in list_of_clusters:
sum_distances = []
if len(cluster) == 1:
sum_distances.append(0)
else:
cluster = numpy.array(cluster)
for entry in cluster:
allButEntry = cluster[cluster != entry]
#print cluster, entry, allButEntry
#print self.one_minus_overlap_values[entry,:]
totalDist = numpy.sum(self.one_minus_overlap_values[entry,allButEntry])
sum_distances.append(totalDist)
#print cluster, sum_distances, numpy.argsort(sum_distances)[0]
centroid_cluster_index = numpy.argsort(sum_distances)[0]
centroid_global_index = cluster[centroid_cluster_index]
centroid_list.append(centroid_global_index)
if (self.indexToFrame == {}) and (self.indexToNpyFile != {}):
repsFileName = '%scluster_reps.csv' %(outputPrefix)
membersFileName = '%scluster_members.csv' %(outputPrefix)
print "Unable to extract frame numbers from file names. Writing out npy file names to %s and %s" %(repsFileName, membersFileName)
with open(repsFileName,'wb') as of:
cluster_rep_file_names = [str(self.indexToNpyFile[i]) for i in centroid_list]
of.write('\n'.join(cluster_rep_file_names))
with open(membersFileName,'wb') as of:
for cluster in list_of_clusters:
cluster_member_file_names =[str(self.indexToNpyFile[i]) for i in cluster]
of.write(' '.join(cluster_member_file_names))
of.write('\n')
elif (self.indexToFrame == {}) and (self.indexToNpyFile == {}):
print "No matrix-index-to-trajectory-frame mapping given. Writing out matrix indices"
with open('%scluster_reps.csv' %(outputPrefix),'wb') as of:
of.write('\n'.join([str(i) for i in centroid_list]))
with open('%scluster_members.csv' %(outputPrefix),'wb') as of:
for cluster in list_of_clusters:
of.write(' '.join([str(i) for i in cluster]))
of.write('\n')
elif (self.indexToFrame != {}):
repsFileName = '%scluster_reps.csv' %(outputPrefix)
membersFileName = '%scluster_members.csv' %(outputPrefix)
print "Matrix-index-to-trajectory-frame mapping given. Writing out trajectory frames to %s and %s." %(repsFileName, membersFileName)
with open(repsFileName,'wb') as of:
cluster_rep_frame_nums = [str(self.indexToFrame[i]) for i in centroid_list]
cluster_rep_prefixes = [str(self.indexToPrefix[i]) for i in centroid_list]
cluster_rep_strings = ['_'.join(i) for i in zip(cluster_rep_prefixes, cluster_rep_frame_nums)]
of.write('\n'.join(cluster_rep_strings))
with open(membersFileName,'wb') as of:
for cluster in list_of_clusters:
cluster_member_frame_nums =[str(self.indexToFrame[i]) for i in cluster]
cluster_member_prefixes = [str(self.indexToPrefix[i]) for i in cluster]
cluster_member_strings = ['_'.join(i) for i in zip(cluster_member_prefixes, cluster_member_frame_nums)]
of.write(' '.join(cluster_member_strings))
of.write('\n')
if (self.indexToFrame != {}) and (self.prefixToTrajectory != {}):
print "Extracting trajectory frames"
matrixIndex2Cluster = {}
for index, centroid in enumerate(centroid_list):
matrixIndex2Cluster[centroid] = index
clusterInd2CentFileName = self.extractFrames(matrixIndex2Cluster, outputPrefix, reps=True)
else:
clusterInd2CentFileName = {}
return clusterInd2CentFileName
def outputAllFrames(self, list_of_clusters, outputPrefix):
## check to make sure we'll be able to map all matrix indices to files
for clusterInd, cluster in enumerate(list_of_clusters):
#print cluster
#print indexToFrame.keys()
for matrixInd in cluster:
if not(matrixInd in self.indexToFrame.keys()):
raise Exception('User requested all frame pdbs to be output to cluster directories, but the program is unable to map all overlap matrix indices to trajectory/frame combinations. Make sure that -t/-T and -i arguments cover all frames and prefixes. Index: %i Cluster: %i' %(matrixInd, clusterInd))
## If all mappings exist, extract all relevant frames
matrixInd2Cluster = {}
for clusterInd, cluster in enumerate(list_of_clusters):
for matrixInd in cluster:
matrixInd2Cluster[matrixInd] = clusterInd
self.extractFrames(matrixInd2Cluster, outputPrefix, reps=False)
def extractFrames(self, matrixIndex2Cluster, outputPrefix, reps=False):
framesToExtract = {}
clusterInd2CentFileName = {}
for thisMatrixIndex in matrixIndex2Cluster:
thisCluster = matrixIndex2Cluster[thisMatrixIndex]
npyFileName = self.indexToNpyFile[thisMatrixIndex]
npyFilePrefix = npyFileName.split('/')[-1].split('frame_')[0]
frameNum = int(npyFileName.split('/')[-1].split('frame_')[-1].replace('.npy',''))
prefixMatch = ''
## See if this prefix is in our dictionary or trajectories
for trajPrefix in self.prefixToTrajectory.keys():
if trajPrefix == npyFilePrefix:
if prefixMatch == '':
prefixMatch = trajPrefix
else: # If a matching prefix has already been found
raise Exception('ERROR - file %s matches prefix %s and %s' %(npyFileName, trajPrefix, prefixMatch))
## Disabled this block - All prefix-to-trajectory matching should be explicit. This caused an error when POVME was run whith a blank prefix
#if prefixMatch == '':
# trajFileName = npyFilePrefix + '.pdb'
#else:
trajFileName = self.prefixToTrajectory[prefixMatch]
## Figure out the directory and filename that this frame should be written to
outputDir = '%scluster%i' %(outputPrefix, thisCluster)
if not os.path.exists(outputDir):
os.system('mkdir %s' %(outputDir))
if reps == True:
outputFileName = 'REP_%sframe_%i.pdb' %(prefixMatch, frameNum)
clusterInd2CentFileName[thisCluster] = outputFileName
else:
outputFileName = '%sframe_%i.pdb' %(prefixMatch, frameNum)
fullOutputFileName = outputDir + '/' + outputFileName
if not trajFileName in framesToExtract.keys():
framesToExtract[trajFileName] = {}
framesToExtract[trajFileName][frameNum] = fullOutputFileName
for trajFileName in framesToExtract:
frameCounter = 1
frameData = ''
with open(trajFileName) as fo:
for line in fo:
if frameCounter in framesToExtract[trajFileName]:
frameData += line
if 'END' == line[:3]:
if frameData != '':
thisOutputFileName = framesToExtract[trajFileName][frameCounter]
with open(thisOutputFileName,'wb') as of:
of.write(frameData)
frameData = ''
frameCounter += 1
if frameData != '':
thisOutputFileName = framesToExtract[trajFileName][frameCounter]
with open(thisOutputFileName,'wb') as of:
of.write(frameData)
return clusterInd2CentFileName
'''
def extractFrame(self, matrixIndex, outputDir, rep=False):
npyFileName = self.indexToNpyFile[matrixIndex]
npyFilePrefix = npyFileName.split('/')[-1].split('frame_')[0]
frameNum = int(npyFileName.split('/')[-1].split('frame_')[-1].replace('.npy',''))
prefixMatch = ''
for trajPrefix in self.prefixToTrajectory.keys():
if trajPrefix == npyFilePrefix:
if prefixMatch == '':
prefixMatch = trajPrefix
else: # If a matching prefix has already been found
raise Exception('ERROR - file %s matches prefix %s and %s' %(npyFileName, trajPrefix, prefixMatch))
if prefixMatch == '':
trajFileName = npyFilePrefix + '.pdb'
else:
trajFileName = self.prefixToTrajectory[prefixMatch]
if rep == True:
outputFileName = '%s/REP_%sframe_%i.pdb' %(outputDir, prefixMatch, frameNum)
else:
outputFileName = '%s/%sframe_%i.pdb' %(outputDir, prefixMatch, frameNum)
frameCounter = 0
frameData = ''
with open(trajFileName) as fo:
for line in fo:
if frameCounter == frameNum:
frameData += line
if 'END' in line.strip():
frameCounter += 1
with open(outputFileName,'wb') as of:
of.write(frameData)
'''
def generate_difference_maps(self, list_of_clusters, clusterInd2CentFileName, outputPrefix):
print "Generating difference maps"
#nFrames = len(frame_assignments)
nFrames = sum([len(i) for i in list_of_clusters])
#list_of_clusters = self.separate_clusters(frame_assignments)
#nClusters = len(list_of_clusters)
allFrameCounts = {}
clusterCounts = []
for clusterIndex, cluster in enumerate(list_of_clusters):
nClusterFrames = len(cluster)
thisClusterCounts = {}
for matrixIndex in cluster:
npyFilename = self.indexToNpyFile[matrixIndex]
points = numpy.load(npyFilename)
if len(points) == 0:
continue
# If the list has intensity values
if points.shape[1]==4:
for point in points:
tuplePoint = tuple(point[:3])
allFrameCounts[tuplePoint] = allFrameCounts.get(tuplePoint,0) + (point[3]/nFrames)
thisClusterCounts[tuplePoint] = thisClusterCounts.get(tuplePoint,0) + (point[3]/nClusterFrames)
else:
for point in points:
tuplePoint = tuple(point)
allFrameCounts[tuplePoint] = allFrameCounts.get(tuplePoint,0) + (1.0/nFrames)
thisClusterCounts[tuplePoint] = thisClusterCounts.get(tuplePoint,0) + (1.0/nClusterFrames)
clusterCounts.append(thisClusterCounts)
allPoints = numpy.array(allFrameCounts.keys())
allFrameMap = peel.featureMap.fromPovmeList(allPoints, justCoords = True, skinDistance=2.)
allFrameMap.data[:] = 0.0
for point in allFrameCounts.keys():
thisIndex = allFrameMap.point_to_nearest_index(point)
allFrameMap.data[thisIndex] = allFrameCounts[point]
clusterMaps = []
for thisClusterCounts in clusterCounts:
thisClusterMap = peel.featureMap.fromPovmeList(allPoints, justCoords = True, skinDistance=2.)
thisClusterMap.data[:] = 0.0
for point in thisClusterCounts.keys():
thisIndex = allFrameMap.point_to_nearest_index(point)
thisClusterMap.data[thisIndex] = thisClusterCounts[point]
clusterMaps.append(thisClusterMap)
templateLoadDifference = '''mol new {%s} waitfor all
display projection Orthographic
mol modstyle 0 top Isosurface 0.7500000 0 0 1 1 1
#mol modstyle 0 !MOLID! Isosurface 0.2500000 0 1 1 1 1
#white
mol modcolor 0 top ColorID 8
mol addfile {%s} waitfor all
mol addrep top
#mol addrep !MOLID!
mol modstyle 1 top Isosurface 0.250000 1 0 1 1 1
#mol modstyle 1 !MOLID! Isosurface 0.250000 1 2 1 1 1
#blue
mol modcolor 1 top ColorID 0
mol showrep top 1 0
#mol showrep !MOLID! 1 0
mol addfile {%s} waitfor all
mol addrep top
#mol addrep !MOLID!
mol modmaterial 2 top Transparent
mol modstyle 2 top Isosurface 0.2500000 2 0 0 1 1
#mol modstyle 2 !MOLID! Isosurface 0.2500000 2 2 1 1 1
#green
mol modcolor 2 top ColorID 12
mol addfile {%s} waitfor all
mol addrep top
#mol addrep !MOLID!
mol modmaterial 3 top Transparent
mol modstyle 3 top Isosurface -0.7500000 3 0 0 1 1
#mol modstyle 3 !MOLID! Isosurface -0.2500000 3 2 1 1 1
#red
mol modcolor 3 top ColorID 1
# Now load the protein
mol addfile {%s} type {pdb} first 0 last -1 step 1 waitfor all
mol modstyle 4 top NewCartoon 0.300000 10.000000 4.100000 0
'''
plotAll = ''
allFrameDxName = '%saveragePocket.dx' %(outputPrefix)
allFrameMap.write_dx_file(allFrameDxName)
for clusterIndex, clusterMap in enumerate(clusterMaps):
outputDir = '%scluster%i' %(outputPrefix, clusterIndex)
if not os.path.exists(outputDir):
os.system('mkdir %s' %(outputDir))
thisClusterDxName = '%saverage.dx' %(outputPrefix)
clusterMap.write_dx_file(outputDir+'/'+thisClusterDxName)
differenceMap = copy.deepcopy(clusterMap)
differenceMap.data = differenceMap.data - allFrameMap.data
thisDifferenceDxName = '%sdifference.dx' %(outputPrefix)
differenceMap.write_dx_file(outputDir+'/'+thisDifferenceDxName)
thisCentroidPdbName = clusterInd2CentFileName[clusterIndex]
thisVmdScript = templateLoadDifference %('../'+allFrameDxName,
thisClusterDxName,
thisDifferenceDxName,
thisDifferenceDxName,
thisCentroidPdbName)
thisVmdScript = thisVmdScript.replace('!MOLID!', '0')
with open('%s/visualize.vmd' %(outputDir),'wb') as of:
of.write(thisVmdScript)
plotAllContrib = templateLoadDifference %(allFrameDxName,
outputDir+'/'+thisClusterDxName,
outputDir+'/'+thisDifferenceDxName,
outputDir+'/'+thisDifferenceDxName,
outputDir+'/'+thisCentroidPdbName)
plotAllContrib = plotAllContrib.replace('!MOLID!',str(clusterIndex))
plotAll += plotAllContrib
with open('%svisualizeAll.vmd' %(outputPrefix),'wb') as of:
of.write(plotAll)
## Write gobstopper view script
gobstopperViewHeader = '''
display projection Orthographic
color Display Background white
material add copy RTChrome
material change ambient Material23 0.00000
material change diffuse Material23 1.00000
material change specular Material23 0.00000
material change shininess Material23 0.00000
material change mirror Material23 0.00000
material change opacity Material23 0.00000
material change outline Material23 0.00000
material change outlinewidth Material23 0.00000
material change transmode Material23 1.00000
'''
templateGobstopperViewScript = '''
mol new {!!!CLUSTER AVERAGE FILENAME!!!} waitfor all
mol modstyle 0 top Isosurface 0.7500000 0 0 1 1 1
#mol modstyle 0 !MOLID! Isosurface 0.2500000 0 1 1 1 1
#white
mol modcolor 0 top ColorID 8
mol addrep top
mol modstyle 1 top Isosurface 0.250000 1 0 1 1 1
#blue
mol modcolor 1 top ColorID 0
mol showrep top 1 0
'''
# cluster parameter is a list of lists - each list is a cluster
# Return values:
# spread
# cnum - a value indicating the number of clusters excluding clusters with only 1 frame
def find_spreads(self,clusters):
# print cluster
spreads = []
cnum = 0
newWay = True
#print 'AAAA'
if newWay == True:
for cluster in clusters:
thisSpread = numpy.sum(self.one_minus_overlap_values[cluster,:][:,cluster])/2
spreads.append(thisSpread)
else:
for current_set in clusters:
curr_spread = 0
# print current_set
# All combinations of frames in current cluster
#print current_set
combinations = itertools.combinations(current_set,2)
#NEED TO USE FIRST AND SECOND VALUES OUT OF ITERATOR!!!!!!!!
for frame1, frame2 in combinations:
curr_spread += self.one_minus_overlap_values[frame1][frame2]
# Calculate the N(N-1)/2 denominator for the spread of a cluster
# SET SPREAD TO 1 IF THERE ARE NO ELEMENTS OR ONLY A SINGLE ELEMENT
# IN THE CLUSTER
if len(current_set) <= 1:
spreads.append(0)
else:
cnum += 1
curr_spread /= (len(current_set)*(len(current_set)-1)/2)
spreads.append(curr_spread)
## Unexpected numbers of clusters are now handled in the Kelley penalty code segment
#return spread,cnum
return spreads
'''
# DO ALL CALCULATIONS IN ONE METHOD OR DO SEPARATE METHODS???
def average_spread(self,spread,cnum):
avg_value = 0
for i in spread:
avg_value += i
avg_value = avg_value / cnum
return avg_value
def norm_avg_spread(self, list_avg_spread):
max_avg_spread = list_avg_spread[0]
min_avg_spread = list_avg_spread[0]
for i in list_avg_spread:
if i > max_avg_spread:
max_avg_spread = i
elif i < min_avg_spread:
min_avg_spread = i
return
'''
#def print_help():
# print "To run cluster.py: cluster.py [optional -h -k] binding_overlap_file pdb_trajectory_file output_file_names"
class main():
def __init__(self,argv):
''' TEMP INPUT: clsuter.py overlap_file original_traj_file '''
''' Pick between running kmeans or hierarchical clustering or both
Parse the command line inputs.
'''
parser = argparse.ArgumentParser(description="Cluster POVME pocket volumes.")
parser.add_argument('-m',
help='The pocket overlap matrix (generated by binding_site_overlap.py)')
parser.add_argument('-t', nargs='?', action='append',default=[],
help='A mapping between .npy file prefixes and their original pdb file/trajectory, separated by a colon. Can be used repeatedly. Ex: -t 1BYQ:./trajectories/1BYQ.pdb -t 1UYF:./trajectories/1UYF.pdb')
parser.add_argument('-T', nargs='?', action='append', default=[],
help='A file containing a series of -t arguments')
parser.add_argument('-i', nargs='?',
help='The index file mapping the pocket overlap matrix to frame numbers. Required to return cluster representatives.')
parser.add_argument('--kmeans', action='store_true',
help='Use kmeans clustering instead of hierarchical.')
parser.add_argument('-n', nargs='?',
help='Manually set number of clusters. Otherwise the Kelley penalty will calculate the optimal number.')
parser.add_argument('-N', nargs='?',
help='Set min, min:max, or min:max:skip values for number of clusters that the Kelley penalty can consider.')
parser.add_argument('-o', nargs='?', default='',
help='Specify an output prefix.')
parser.add_argument('-a', action='store_true',
help='Output all frames into cluster subdirectories (not just cluster reps).')
args = parser.parse_args(sys.argv[1:])
#''' Initial options '''
#command_input = {}
#command_input['kmeans'] = False
#command_input['hierarchical'] = False
#command_input['csv_file'] = ''
#command_input['output_name'] = ''
#command_input['num_clusters'] = None
#command_input['indexToFrames'] = ''
#'''Quick and dirty hack for options - need to find more efficient way '''
#for arg in argv:
# print arg
# if 'indexMapToFrames.csv' in arg:
# command_input['indexToFrames'] = arg
# elif '.csv' in arg:
# command_input['csv_file'] = arg
# elif arg == "-k":
# command_input['kmeans']= True
# elif arg == "-h":
# command_input['hierarchical'] = True
# elif arg.isdigit():
# command_input['num_clusters'] = int(arg)
# Print message and exit out of program if missing essential files
if args.m == '':
print "Cannot run cluster.py: Need an overlap file from binding_site_overlap.py in order to cluster \n"
#print_help()
sys.exit(1)
#if command_input['indexToFrames'] =='':
#print args.i
# if command_input['pdb_file'] == '':
# print "Cannot run cluster.py: Need the initial trajectory file in order to separate into clusters \n"
# print_help()
# sys.exit(1)
''' Currently only matches correctly if you run script from the folder where all the npy files are located '''
# for filename in os.listdir('.'):
# print filename
# if fnmatch.fnmatch(filename,input_string_file):
# command_input['traj_file'].append(filename)
# If both -k and -h weren't specified, then we want to allow both options
##if command_input['kmeans'] == command_input['hierarchical']:
## command_input['kmeans'] = True
## command_input['hierarchical'] = True
# command_input['output_name'] = command_input['pdb_file'].strip('.pdb')
# Read csv overlap file and parse results
csv_input = InputReader()
#csv_input.read_traj(command_input['indexToFrames'])
#csv_input.read_overlap(command_input['csv_file'])
csv_input.read_overlap(args.m)
#if args.i != None:
csv_input.read_indexFile(args.i)
csv_input.parse_traj_inputs(args.t, args.T)
#print csv_input.prefixToTrajectory
#1/0
#else:
#If the user didn't specify an index file, make a dictionary that just returns the input number
#nFrames = len(csv_input.overlapMatrix)
#for i in range(nFrames):
# csv_input.frameToFileName[i]=i
#coordinates = Cluster(csv_input.coordinates,csv_input.frames,csv_input.overlapMatrix,csv_input.frameToFileName)
clustering_obj = Cluster(csv_input)
#print args.n
if args.n != None:
if args.N != None:
raise Exception('Both -n and -N command line options specified.')
#If the user manually specified a number of clusters
if args.kmeans == True:
list_of_clusters = clustering_obj.kmeans_cluster(int(args.n))
else:
list_of_clusters = clustering_obj.hierarchical_cluster(int(args.n))
#clusters = clustering_obj.separate_clusters(frame_assignments)
# If the user didn't specify the number of clusters...
else:
# ...use the kelley penalty to find the optimal number
if args.N != None:
argsNsp = args.N.split(':')
if len(argsNsp) == 1:
maxKPClusters = int(argsNsp[0])
userNClusters = range(1,maxKPClusters+1)
print "Computing Kelley penalty for nClusters from 1 to %i" %(maxKPClusters)
elif args.N.count(':') == 1:
minKPClusters = int(argsNsp[0])
maxKPClusters = int(argsNsp[1])
userNClusters = range(minKPClusters,maxKPClusters+1)
print "Computing Kelley penalty for nClusters from %i to %i" %(minKPClusters, maxKPClusters)
elif args.N.count(':') == 2:
minKPClusters = int(argsNsp[0])
maxKPClusters = int(argsNsp[1])
stride = int(argsNsp[2])
userNClusters = range(minKPClusters,maxKPClusters+1,stride)
print "Computing Kelley penalty for nClusters from %i to %i, taking strides of %i" %(minKPClusters, maxKPClusters, stride)
else:
#maxKPClusters = clustering_obj.frames
maxKPClusters = min(75, clustering_obj.frames)
userNClusters = range(1,maxKPClusters+1)
print "Computing Kelley penalty for nClusters from 1 to %i" %(maxKPClusters)
## In order to achieve proper scaling, we must ALWAYS have 1 as a possible cluster number
## in the Kelley penalty computations
if not 1 in userNClusters:
potentialNClusters = [1] + userNClusters
else:
potentialNClusters = userNClusters
## We'll be doing numpy-style slicing later so convert it here
potentialNClusters = numpy.array(potentialNClusters)
clustering_results = {}
#avSpreads = numpy.zeros(maxKPClusters+1)
avSpreads = []
# Invalidate "0 clusters" option
#avSpreads[0] = -1
lastProgressDecile = 0
for index, nClusters in enumerate(potentialNClusters):
progressDecile = (10*index)/len(potentialNClusters)
if progressDecile > lastProgressDecile:
lastProgressDecile = progressDecile
print "Kelley penalty " + str(progressDecile*10) + "% computed"
if args.kmeans == True:
list_of_clusters = clustering_obj.kmeans_cluster(nClusters)
else:
list_of_clusters = clustering_obj.hierarchical_cluster(nClusters)
clustering_results[nClusters] = list_of_clusters
#clusters = clustering_obj.separate_clusters(frame_assignments)
if len(list_of_clusters) != nClusters:
# If we didn't get as many clusters as we expected, put a placeholder in the array
#avSpreads[nClusters] = -1
avSpreads.append(-1)
# and then skip to the next iteration
continue
spreads = clustering_obj.find_spreads(list_of_clusters)
nSingletonClusters = sum([len(i) == 1 for i in list_of_clusters])
avSpread = float(sum(spreads)) / (nClusters - nSingletonClusters)
#avSpreads[nClusters] = avSpread
avSpreads.append(avSpread)
avSpreads = numpy.array(avSpreads)
## Remove places where the spread is -1
# Make boolean index array validSpreads (eg; [0 1 1 0 1] )
validSpreads = avSpreads != -1
# find indices of valid spreads (eg; [1 2 4] for above)
validIndices = numpy.nonzero(validSpreads)[0]
## Remove invalid nClusters and avSpread values
validNClusters = potentialNClusters[validIndices]
avSpreads = avSpreads[validSpreads]
## Now normalize spreads to the range (1, N-1)
# subtract to bring the minimum value to 0
avSpreads -= numpy.min(avSpreads)
# multiply to bring the max value to N-2
avSpreads *= (clustering_obj.frames-2)/numpy.max(avSpreads)
# Then add 1 to everything to shift the range to (1, N-1)
avSpreads += 1
## and finally compute the penalty value
kPenalties = avSpreads + (1.0*validNClusters)
#pylab.scatter(validNClusters, kPenalties)
if 1 in userNClusters:
pylab.plot(validNClusters, kPenalties, '-o')
else:
pylab.plot(validNClusters[1:], kPenalties[1:], '-o')
pylab.show()
optimal_nClusters = validNClusters[numpy.argsort(kPenalties)[0]]
list_of_clusters = clustering_results[optimal_nClusters]
#clusters = clustering_obj.separate_clusters(frame_assignments)
print "Done computing Kelley penalty. Optimal number of clusters is %i" %(optimal_nClusters)
clusterInd2CentFileName = clustering_obj.find_centroids(list_of_clusters, args.o)
## If desired, output all frames in this cluster
if args.a == True:
clustering_obj.outputAllFrames(list_of_clusters, args.o)
## Generate cluster characteristics
if (clustering_obj.indexToFrame != {}) and (clustering_obj.prefixToTrajectory != {}):
clustering_obj.generate_difference_maps(list_of_clusters, clusterInd2CentFileName, args.o)
if __name__ == "__main__": main(sys.argv)
|
|
"""
Test script for dataclasses
"""
from __future__ import absolute_import, division, print_function
from tests.util import unittest_reporter, glob_tests
import logging
logger = logging.getLogger('dataclasses')
import os
import sys
import json
try:
import cPickle as pickle
except:
import pickle
import unittest
from iceprod.core import to_log
import iceprod.core.dataclasses
class dataclasses_test(unittest.TestCase):
def setUp(self):
super(dataclasses_test,self).setUp()
def tearDown(self):
super(dataclasses_test,self).tearDown()
@unittest_reporter
def test_01_Job(self):
"""Test the Job class"""
j = iceprod.core.dataclasses.Job()
if not j.valid():
raise Exception('empty job not valid')
j.convert()
if not j.valid():
raise Exception('converted empty job not valid')
@unittest_reporter
def test_02_Steering(self):
"""Test the Steering class"""
s = iceprod.core.dataclasses.Steering()
if not s.valid():
raise Exception('empty steering not valid')
s.convert()
if not s.valid():
raise Exception('converted empty steering not valid')
@unittest_reporter
def test_03_Task(self):
"""Test the Task class"""
t = iceprod.core.dataclasses.Task()
if not t.valid():
raise Exception('empty task not valid')
t.convert()
if not t.valid():
raise Exception('converted empty task not valid')
@unittest_reporter
def test_04_Tray(self):
"""Test the Tray class"""
t = iceprod.core.dataclasses.Tray()
if not t.valid():
raise Exception('empty tray not valid')
t.convert()
if not t.valid():
raise Exception('converted empty tray not valid')
@unittest_reporter
def test_05_Module(self):
"""Test the Module class"""
m = iceprod.core.dataclasses.Module()
if not m.valid():
raise Exception('empty module not valid')
m.convert()
if not m.valid():
raise Exception('converted empty module not valid')
@unittest_reporter
def test_06_Class(self):
"""Test the Class class"""
c = iceprod.core.dataclasses.Class()
if not c.valid():
raise Exception('empty class not valid')
c.convert()
if not c.valid():
raise Exception('converted empty class not valid')
@unittest_reporter
def test_08_Resource(self):
"""Test the Resource class"""
r = iceprod.core.dataclasses.Resource()
if not r.valid():
raise Exception('empty resource not valid')
r.convert()
if not r.valid():
raise Exception('converted empty resource not valid')
r['transfer'] = False
self.assertIs(r.do_transfer(), False)
r['transfer'] = 'f'
self.assertIs(r.do_transfer(), False)
r['transfer'] = 'N'
self.assertIs(r.do_transfer(), False)
r['transfer'] = 0
self.assertIs(r.do_transfer(), False)
r['transfer'] = True
self.assertIs(r.do_transfer(), True)
r['transfer'] = 'T'
self.assertIs(r.do_transfer(), True)
r['transfer'] = 'Yes'
self.assertIs(r.do_transfer(), True)
r['transfer'] = 1
self.assertIs(r.do_transfer(), True)
r['transfer'] = 'maybe'
self.assertEqual(r.do_transfer(), 'maybe')
r['transfer'] = 'If'
self.assertEqual(r.do_transfer(), 'maybe')
r['transfer'] = 'if needed'
self.assertEqual(r.do_transfer(), 'maybe')
r['transfer'] = 'exists'
self.assertEqual(r.do_transfer(), 'maybe')
r['transfer'] = 'blah'
self.assertIs(r.do_transfer(), True)
r['transfer'] = 1234
self.assertIs(r.do_transfer(), True)
r['transfer'] = [1,2,3]
self.assertIs(r.do_transfer(), True)
@unittest_reporter
def test_09_Data(self):
"""Test the Data class"""
d = iceprod.core.dataclasses.Data()
if not d.valid():
raise Exception('empty data not valid')
d.convert()
if not d.valid():
raise Exception('converted empty data not valid')
@unittest_reporter
def test_10_Batchsys(self):
"""Test the Batchsys class"""
b = iceprod.core.dataclasses.Batchsys()
if not b.valid():
raise Exception('empty batchsys not valid')
b.convert()
if not b.valid():
raise Exception('converted empty batchsys not valid')
@unittest_reporter
def test_20_DifPlus(self):
"""Test the DifPlus class"""
d = iceprod.core.dataclasses.DifPlus()
if not d.valid():
raise Exception('empty difplus not valid')
d.convert()
if not d.valid():
raise Exception('converted empty difplus not valid')
@unittest_reporter
def test_21_Dif(self):
"""Test the Dif class"""
d = iceprod.core.dataclasses.Dif()
if not d.valid():
raise Exception('empty dif not valid')
d.convert()
if not d.valid():
raise Exception('converted empty dif not valid')
@unittest_reporter
def test_22_Plus(self):
"""Test the Plus class"""
p = iceprod.core.dataclasses.Plus()
if not p.valid():
raise Exception('empty plus not valid')
p.convert()
if not p.valid():
raise Exception('converted empty plus not valid')
@unittest_reporter
def test_23_Personnel(self):
"""Test the Personnel class"""
p = iceprod.core.dataclasses.Personnel()
if not p.valid():
raise Exception('empty personnel not valid')
p.convert()
if not p.valid():
raise Exception('converted empty personnel not valid')
@unittest_reporter
def test_24_Datacenter(self):
"""Test the DataCenter class"""
d = iceprod.core.dataclasses.DataCenter()
if not d.valid():
raise Exception('empty datacenter not valid')
d.convert()
if not d.valid():
raise Exception('converted empty datacenter not valid')
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
alltests = glob_tests(loader.getTestCaseNames(dataclasses_test))
suite.addTests(loader.loadTestsFromNames(alltests,dataclasses_test))
return suite
|
|
# Copyright 2013, Big Switch Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon_lib import exceptions
from horizon_lib import forms
from horizon_lib import messages
from horizon_lib.utils import validators
from openstack_horizon import api
port_validator = validators.validate_port_or_colon_separated_port_range
LOG = logging.getLogger(__name__)
class UpdateRule(forms.SelfHandlingForm):
name = forms.CharField(max_length=80, label=_("Name"), required=False)
description = forms.CharField(
required=False,
max_length=80, label=_("Description"))
protocol = forms.ChoiceField(
label=_("Protocol"), required=False,
choices=[('TCP', _('TCP')), ('UDP', _('UDP')), ('ICMP', _('ICMP')),
('ANY', _('ANY'))],
help_text=_('Protocol for the firewall rule'))
action = forms.ChoiceField(
label=_("Action"), required=False,
choices=[('ALLOW', _('ALLOW')), ('DENY', _('DENY'))],
help_text=_('Action for the firewall rule'))
source_ip_address = forms.IPField(
label=_("Source IP Address/Subnet"),
version=forms.IPv4 | forms.IPv6,
required=False, mask=True,
help_text=_('Source IP address or subnet'))
destination_ip_address = forms.IPField(
label=_('Destination IP Address/Subnet'),
version=forms.IPv4 | forms.IPv6,
required=False, mask=True,
help_text=_('Destination IP address or subnet'))
source_port = forms.CharField(
max_length=80,
label=_("Source Port/Port Range"),
required=False,
validators=[port_validator],
help_text=_('Source port (integer in [1, 65535] or range in a:b)'))
destination_port = forms.CharField(
max_length=80,
label=_("Destination Port/Port Range"),
required=False,
validators=[port_validator],
help_text=_('Destination port (integer in [1, 65535] or range'
' in a:b)'))
shared = forms.BooleanField(label=_("Shared"), required=False)
enabled = forms.BooleanField(label=_("Enabled"), required=False)
failure_url = 'horizon:project:firewalls:index'
def handle(self, request, context):
rule_id = self.initial['rule_id']
name_or_id = context.get('name') or rule_id
if context['protocol'] == 'ANY':
context['protocol'] = None
for f in ['source_ip_address', 'destination_ip_address',
'source_port', 'destination_port']:
if not context[f]:
context[f] = None
try:
rule = api.fwaas.rule_update(request, rule_id, **context)
msg = _('Rule %s was successfully updated.') % name_or_id
LOG.debug(msg)
messages.success(request, msg)
return rule
except Exception as e:
msg = (_('Failed to update rule %(name)s: %(reason)s') %
{'name': name_or_id, 'reason': e})
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class UpdatePolicy(forms.SelfHandlingForm):
name = forms.CharField(max_length=80, label=_("Name"), required=False)
description = forms.CharField(required=False,
max_length=80, label=_("Description"))
shared = forms.BooleanField(label=_("Shared"), required=False)
audited = forms.BooleanField(label=_("Audited"), required=False)
failure_url = 'horizon:project:firewalls:index'
def handle(self, request, context):
policy_id = self.initial['policy_id']
name_or_id = context.get('name') or policy_id
try:
policy = api.fwaas.policy_update(request, policy_id, **context)
msg = _('Policy %s was successfully updated.') % name_or_id
LOG.debug(msg)
messages.success(request, msg)
return policy
except Exception as e:
msg = _('Failed to update policy %(name)s: %(reason)s') % {
'name': name_or_id, 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class UpdateFirewall(forms.SelfHandlingForm):
name = forms.CharField(max_length=80,
label=_("Name"),
required=False)
description = forms.CharField(max_length=80,
label=_("Description"),
required=False)
firewall_policy_id = forms.ChoiceField(label=_("Policy"))
# TODO(amotoki): make UP/DOWN translatable
admin_state_up = forms.ChoiceField(choices=[(True, 'UP'), (False, 'DOWN')],
label=_("Admin State"))
failure_url = 'horizon:project:firewalls:index'
def __init__(self, request, *args, **kwargs):
super(UpdateFirewall, self).__init__(request, *args, **kwargs)
try:
tenant_id = self.request.user.tenant_id
policies = api.fwaas.policy_list(request, tenant_id=tenant_id)
policies = sorted(policies, key=lambda policy: policy.name)
except Exception:
exceptions.handle(request,
_('Unable to retrieve policy list.'))
policies = []
policy_id = kwargs['initial']['firewall_policy_id']
policy_name = [p.name for p in policies if p.id == policy_id][0]
firewall_policy_id_choices = [(policy_id, policy_name)]
for p in policies:
if p.id != policy_id:
p.set_id_as_name_if_empty()
firewall_policy_id_choices.append((p.id, p.name))
self.fields['firewall_policy_id'].choices = firewall_policy_id_choices
def handle(self, request, context):
firewall_id = self.initial['firewall_id']
name_or_id = context.get('name') or firewall_id
context['admin_state_up'] = (context['admin_state_up'] == 'True')
try:
firewall = api.fwaas.firewall_update(request, firewall_id,
**context)
msg = _('Firewall %s was successfully updated.') % name_or_id
LOG.debug(msg)
messages.success(request, msg)
return firewall
except Exception as e:
msg = _('Failed to update firewall %(name)s: %(reason)s') % {
'name': name_or_id, 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class InsertRuleToPolicy(forms.SelfHandlingForm):
firewall_rule_id = forms.ChoiceField(label=_("Insert Rule"))
insert_before = forms.ChoiceField(label=_("Before"),
required=False)
insert_after = forms.ChoiceField(label=_("After"),
required=False)
failure_url = 'horizon:project:firewalls:index'
def __init__(self, request, *args, **kwargs):
super(InsertRuleToPolicy, self).__init__(request, *args, **kwargs)
tenant_id = self.request.user.tenant_id
try:
all_rules = api.fwaas.rule_list(request, tenant_id=tenant_id)
for r in all_rules:
r.set_id_as_name_if_empty()
all_rules = sorted(all_rules, key=lambda rule: rule.name)
available_rules = [r for r in all_rules
if not r.firewall_policy_id]
current_rules = []
for r in kwargs['initial']['firewall_rules']:
r_obj = [rule for rule in all_rules if r == rule.id][0]
current_rules.append(r_obj)
available_choices = [(r.id, r.name) for r in available_rules]
current_choices = [(r.id, r.name) for r in current_rules]
except Exception as e:
msg = _('Failed to retrieve available rules: %s') % e
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
self.fields['firewall_rule_id'].choices = available_choices
self.fields['insert_before'].choices = [('', '')] + current_choices
self.fields['insert_after'].choices = [('', '')] + current_choices
def handle(self, request, context):
policy_id = self.initial['policy_id']
policy_name_or_id = self.initial['name'] or policy_id
try:
insert_rule_id = context['firewall_rule_id']
insert_rule = api.fwaas.rule_get(request, insert_rule_id)
body = {'firewall_rule_id': insert_rule_id,
'insert_before': context['insert_before'],
'insert_after': context['insert_after']}
policy = api.fwaas.policy_insert_rule(request, policy_id, **body)
msg = _('Rule %(rule)s was successfully inserted to policy '
'%(policy)s.') % {
'rule': insert_rule.name or insert_rule.id,
'policy': policy_name_or_id}
LOG.debug(msg)
messages.success(request, msg)
return policy
except Exception as e:
msg = _('Failed to insert rule to policy %(name)s: %(reason)s') % {
'name': policy_id, 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class RemoveRuleFromPolicy(forms.SelfHandlingForm):
firewall_rule_id = forms.ChoiceField(label=_("Remove Rule"))
failure_url = 'horizon:project:firewalls:index'
def __init__(self, request, *args, **kwargs):
super(RemoveRuleFromPolicy, self).__init__(request, *args, **kwargs)
tenant_id = request.user.tenant_id
try:
all_rules = api.fwaas.rule_list(request, tenant_id=tenant_id)
for r in all_rules:
r.set_id_as_name_if_empty()
current_rules = []
for r in kwargs['initial']['firewall_rules']:
r_obj = [rule for rule in all_rules if r == rule.id][0]
current_rules.append(r_obj)
current_choices = [(r.id, r.name) for r in current_rules]
except Exception as e:
msg = _('Failed to retrieve current rules in policy %(name)s: '
'%(reason)s') % {'name': self.initial['name'], 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
self.fields['firewall_rule_id'].choices = current_choices
def handle(self, request, context):
policy_id = self.initial['policy_id']
policy_name_or_id = self.initial['name'] or policy_id
try:
remove_rule_id = context['firewall_rule_id']
remove_rule = api.fwaas.rule_get(request, remove_rule_id)
body = {'firewall_rule_id': remove_rule_id}
policy = api.fwaas.policy_remove_rule(request, policy_id, **body)
msg = _('Rule %(rule)s was successfully removed from policy '
'%(policy)s.') % {
'rule': remove_rule.name or remove_rule.id,
'policy': policy_name_or_id}
LOG.debug(msg)
messages.success(request, msg)
return policy
except Exception as e:
msg = _('Failed to remove rule from policy %(name)s: '
'%(reason)s') % {'name': self.initial['name'],
'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
|
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
from typing import List, Optional, Tuple
from shared.insn_yaml import InsnsFile
from shared.mem_layout import get_memory_layout
from .program import ProgInsn, Program
class Snippet:
'''A collection of instructions, generated as part of a random program.'''
def insert_into_program(self, program: Program) -> None:
'''Insert this snippet into the given program
This assumes the parts of the snippet are disjoint from the existing
instructions in the program.
'''
raise NotImplementedError()
def to_json(self) -> object:
'''Serialize to an object that can be written as JSON'''
raise NotImplementedError()
@staticmethod
def _addr_from_json(where: str, json: object) -> int:
'''Read an instruction address from a parsed json object'''
if not isinstance(json, int):
raise ValueError('First coordinate of {} is not an integer.'
.format(where))
if json < 0:
raise ValueError('Address of {} is {}, but should be non-negative.'
.format(where, json))
if json & 3:
raise ValueError('Address of {} is {}, '
'but should be 4-byte aligned.'
.format(where, json))
return json
@staticmethod
def _nonneg_from_hjson(what: str, json: object) -> int:
'''Read a non-negative value from a parsed json object'''
if not isinstance(json, int):
raise ValueError('{} is not an integer.'
.format(what))
if json < 0:
raise ValueError('{} is {}, but should be non-negative.'
.format(what, json))
return json
@staticmethod
def _from_json_lst(insns_file: InsnsFile,
idx: List[int],
json: List[object]) -> 'Snippet':
raise NotImplementedError()
@staticmethod
def from_json(insns_file: InsnsFile,
idx: List[int],
json: object) -> 'Snippet':
'''The inverse of to_json'''
if not (isinstance(json, list) and json):
raise ValueError('Snippet {} is not a nonempty list.'.format(idx))
key = json[0]
if not isinstance(key, str):
raise ValueError('Key for snippet {} is not a string.'.format(idx))
if key == 'BS':
return BranchSnippet._from_json_lst(insns_file, idx, json[1:])
if key == 'LS':
return LoopSnippet._from_json_lst(insns_file, idx, json[1:])
elif key == 'PS':
return ProgSnippet._from_json_lst(insns_file, idx, json[1:])
elif key == 'SS':
return SeqSnippet._from_json_lst(insns_file, idx, json[1:])
else:
raise ValueError('Snippet {} has unknown key {!r}.'
.format(idx, key))
def _merge(self, snippet: 'Snippet') -> bool:
'''Merge snippet after this one and return True if possible.
If not possible, leaves self unchanged and returns False.
'''
return False
def merge(self, snippet: 'Snippet') -> 'Snippet':
'''Merge snippet after this one
On a successful merge, this will alter and return self. If a merge
isn't possible, this generates and returns a SeqSnippet.
'''
if self._merge(snippet):
return self
return SeqSnippet([self, snippet])
@staticmethod
def merge_list(snippets: List['Snippet']) -> 'Snippet':
'''Merge a non-empty list of snippets as much as possible'''
assert snippets
acc = []
cur = snippets[0]
for snippet in snippets[1:]:
if cur._merge(snippet):
continue
acc.append(cur)
cur = snippet
acc.append(cur)
return SeqSnippet(acc)
@staticmethod
def cons_option(snippet0: Optional['Snippet'],
snippet1: 'Snippet') -> 'Snippet':
'''Cons together one or two snippets'''
return snippet1 if snippet0 is None else snippet0.merge(snippet1)
def to_program(self) -> Program:
'''Write a series of disjoint snippets to make a program'''
# Find the size of the memory that we can access. Both memories start
# at address 0: a strict Harvard architecture. (mems[x][0] is the LMA
# for memory x, not the VMA)
mems = get_memory_layout()
imem_lma, imem_size = mems['IMEM']
dmem_lma, dmem_size = mems['DMEM']
program = Program(imem_lma, imem_size, dmem_lma, dmem_size)
self.insert_into_program(program)
return program
class ProgSnippet(Snippet):
'''A sequence of instructions that are executed in order'''
def __init__(self, addr: int, insns: List[ProgInsn]):
assert addr >= 0
assert addr & 3 == 0
self.addr = addr
self.insns = insns
def insert_into_program(self, program: Program) -> None:
program.add_insns(self.addr, self.insns)
def to_json(self) -> object:
'''Serialize to an object that can be written as JSON'''
return ['PS', self.addr, [i.to_json() for i in self.insns]]
@staticmethod
def _from_json_lst(insns_file: InsnsFile,
idx: List[int],
json: List[object]) -> Snippet:
'''The inverse of to_json.'''
# Each element should be a pair: (addr, insns).
if len(json) != 2:
raise ValueError('Snippet {} has {} arguments; '
'expected 2 for a ProgSnippet.'
.format(idx, len(json)))
j_addr, j_insns = json
where = 'snippet {}'.format(idx)
addr = Snippet._addr_from_json(where, j_addr)
if not isinstance(j_insns, list):
raise ValueError('Second coordinate of {} is not a list.'
.format(where))
insns = []
for insn_idx, insn_json in enumerate(j_insns):
pi_where = ('In snippet {}, instruction {}'
.format(idx, insn_idx))
pi = ProgInsn.from_json(insns_file, pi_where, insn_json)
insns.append(pi)
return ProgSnippet(addr, insns)
def _merge(self, snippet: Snippet) -> bool:
if not isinstance(snippet, ProgSnippet):
return False
next_addr = self.addr + 4 * len(self.insns)
if snippet.addr != next_addr:
return False
self.insns += snippet.insns
return True
class SeqSnippet(Snippet):
'''A nonempty sequence of snippets that run one after another'''
def __init__(self, children: List[Snippet]):
assert children
self.children = children
def insert_into_program(self, program: Program) -> None:
for child in self.children:
child.insert_into_program(program)
def to_json(self) -> object:
ret = ['SS'] # type: List[object]
ret += [c.to_json() for c in self.children]
return ret
@staticmethod
def _from_json_lst(insns_file: InsnsFile,
idx: List[int],
json: List[object]) -> Snippet:
if len(json) == 0:
raise ValueError('List at {} for SeqSnippet is empty.'.format(idx))
children = []
for i, item in enumerate(json):
children.append(Snippet.from_json(insns_file, idx + [i], item))
return SeqSnippet(children)
class BranchSnippet(Snippet):
'''A snippet representing a branch
branch_insn is the first instruction that runs, at address addr, then
either snippet0 or snippet1 will run. The program will complete in either
case.
'''
def __init__(self,
addr: int,
branch_insn: ProgInsn,
snippet0: Optional[Snippet],
snippet1: Optional[Snippet]):
assert snippet0 is not None or snippet1 is not None
self.addr = addr
self.branch_insn = branch_insn
self.snippet0 = snippet0
self.snippet1 = snippet1
def insert_into_program(self, program: Program) -> None:
program.add_insns(self.addr, [self.branch_insn])
if self.snippet0 is not None:
self.snippet0.insert_into_program(program)
if self.snippet1 is not None:
self.snippet1.insert_into_program(program)
def to_json(self) -> object:
js0 = None if self.snippet0 is None else self.snippet0.to_json()
js1 = None if self.snippet1 is None else self.snippet1.to_json()
return ['BS',
self.addr,
self.branch_insn.to_json(),
js0,
js1]
@staticmethod
def _from_json_lst(insns_file: InsnsFile,
idx: List[int],
json: List[object]) -> Snippet:
if len(json) != 4:
raise ValueError('List for snippet {} is of the wrong '
'length for a BranchSnippet ({}, not 4)'
.format(idx, len(json)))
j_addr, j_branch_insn, j_snippet0, j_snippet1 = json
addr_where = 'address for snippet {}'.format(idx)
addr = Snippet._addr_from_json(addr_where, j_addr)
bi_where = 'branch instruction for snippet {}'.format(idx)
branch_insn = ProgInsn.from_json(insns_file, bi_where, j_branch_insn)
snippet0 = (None if j_snippet0 is None
else Snippet.from_json(insns_file, idx + [0], j_snippet0))
snippet1 = (None if j_snippet1 is None
else Snippet.from_json(insns_file, idx + [1], j_snippet1))
if snippet0 is None and snippet1 is None:
raise ValueError('Both sides of branch snippet {} are None.'
.format(idx))
return BranchSnippet(addr, branch_insn, snippet0, snippet1)
class LoopSnippet(Snippet):
'''A snippet representing a loop'''
# A pair (from, to), giving a loop warp to apply at some address
Warp = Tuple[int, int]
def __init__(self,
addr: int,
hd_insn: ProgInsn,
body: Snippet,
warp: Optional[Warp]):
self.addr = addr
self.hd_insn = hd_insn
self.body = body
self.warp = warp
def insert_into_program(self, program: Program) -> None:
program.add_insns(self.addr, [self.hd_insn])
if self.warp is not None:
warp_lo, warp_hi = self.warp
program.add_loop_warp(self.addr + 4, warp_lo, warp_hi)
self.body.insert_into_program(program)
def to_json(self) -> object:
return ['LS',
self.addr,
self.hd_insn.to_json(),
self.body.to_json(),
self.warp]
@staticmethod
def _from_json_lst(insns_file: InsnsFile,
idx: List[int],
json: List[object]) -> Snippet:
if len(json) != 4:
raise ValueError('List for snippet {} is of the wrong '
'length for a LoopSnippet ({}, not 4)'
.format(idx, len(json)))
j_addr, j_hd_insn, j_body, j_warp = json
addr_where = 'address for snippet {}'.format(idx)
addr = Snippet._addr_from_json(addr_where, j_addr)
hi_where = 'head instruction for snippet {}'.format(idx)
hd_insn = ProgInsn.from_json(insns_file, hi_where, j_hd_insn)
body = Snippet.from_json(insns_file, idx + [0], j_body)
if j_warp is None:
warp = None
else:
if not isinstance(j_warp, list) or len(j_warp) != 2:
raise ValueError(f'Loop warp for snippet {idx} is not a '
'length-2 list.')
j_warp_lo, j_warp_hi = j_warp
warp_what = f'Loop warp for snippet {idx}'
warp_lo = Snippet._nonneg_from_hjson(warp_what, j_warp_lo)
warp_hi = Snippet._nonneg_from_hjson(warp_what, j_warp_hi)
if warp_lo >= warp_hi:
raise ValueError(f'{warp_what} goes from {warp_lo} to '
f'{warp_hi} (the wrong way!)')
warp = (warp_lo, warp_hi)
return LoopSnippet(addr, hd_insn, body, warp)
|
|
#!/usr/bin/env python2
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import itertools
from optparse import OptionParser
import os
import random
import re
import sys
import subprocess
from collections import namedtuple
from sparktestsupport import SPARK_HOME, USER_HOME, ERROR_CODES
from sparktestsupport.shellutils import exit_from_command_with_retcode, run_cmd, rm_r, which
from sparktestsupport.toposort import toposort_flatten, toposort
import sparktestsupport.modules as modules
# -------------------------------------------------------------------------------------------------
# Functions for traversing module dependency graph
# -------------------------------------------------------------------------------------------------
def determine_modules_for_files(filenames):
"""
Given a list of filenames, return the set of modules that contain those files.
If a file is not associated with a more specific submodule, then this method will consider that
file to belong to the 'root' module.
>>> sorted(x.name for x in determine_modules_for_files(["python/pyspark/a.py", "sql/core/foo"]))
['pyspark-core', 'sql']
>>> [x.name for x in determine_modules_for_files(["file_not_matched_by_any_subproject"])]
['root']
"""
changed_modules = set()
for filename in filenames:
matched_at_least_one_module = False
for module in modules.all_modules:
if module.contains_file(filename):
changed_modules.add(module)
matched_at_least_one_module = True
if not matched_at_least_one_module:
changed_modules.add(modules.root)
return changed_modules
def identify_changed_files_from_git_commits(patch_sha, target_branch=None, target_ref=None):
"""
Given a git commit and target ref, use the set of files changed in the diff in order to
determine which modules' tests should be run.
>>> [x.name for x in determine_modules_for_files( \
identify_changed_files_from_git_commits("fc0a1475ef", target_ref="5da21f07"))]
['graphx']
>>> 'root' in [x.name for x in determine_modules_for_files( \
identify_changed_files_from_git_commits("50a0496a43", target_ref="6765ef9"))]
True
"""
if target_branch is None and target_ref is None:
raise AttributeError("must specify either target_branch or target_ref")
elif target_branch is not None and target_ref is not None:
raise AttributeError("must specify either target_branch or target_ref, not both")
if target_branch is not None:
diff_target = target_branch
run_cmd(['git', 'fetch', 'origin', str(target_branch+':'+target_branch)])
else:
diff_target = target_ref
raw_output = subprocess.check_output(['git', 'diff', '--name-only', patch_sha, diff_target],
universal_newlines=True)
# Remove any empty strings
return [f for f in raw_output.split('\n') if f]
def setup_test_environ(environ):
print("[info] Setup the following environment variables for tests: ")
for (k, v) in environ.items():
print("%s=%s" % (k, v))
os.environ[k] = v
def determine_modules_to_test(changed_modules):
"""
Given a set of modules that have changed, compute the transitive closure of those modules'
dependent modules in order to determine the set of modules that should be tested.
Returns a topologically-sorted list of modules (ties are broken by sorting on module names).
>>> [x.name for x in determine_modules_to_test([modules.root])]
['root']
>>> [x.name for x in determine_modules_to_test([modules.build])]
['root']
>>> [x.name for x in determine_modules_to_test([modules.graphx])]
['graphx', 'examples']
>>> x = [x.name for x in determine_modules_to_test([modules.sql])]
>>> x # doctest: +NORMALIZE_WHITESPACE
['sql', 'hive', 'mllib', 'sql-kafka-0-10', 'examples', 'hive-thriftserver',
'pyspark-sql', 'sparkr', 'pyspark-mllib', 'pyspark-ml']
"""
modules_to_test = set()
for module in changed_modules:
modules_to_test = modules_to_test.union(determine_modules_to_test(module.dependent_modules))
modules_to_test = modules_to_test.union(set(changed_modules))
# If we need to run all of the tests, then we should short-circuit and return 'root'
if modules.root in modules_to_test:
return [modules.root]
return toposort_flatten(
{m: set(m.dependencies).intersection(modules_to_test) for m in modules_to_test}, sort=True)
def determine_tags_to_exclude(changed_modules):
tags = []
for m in modules.all_modules:
if m not in changed_modules:
tags += m.test_tags
return tags
# -------------------------------------------------------------------------------------------------
# Functions for working with subprocesses and shell tools
# -------------------------------------------------------------------------------------------------
def determine_java_executable():
"""Will return the path of the java executable that will be used by Spark's
tests or `None`"""
# Any changes in the way that Spark's build detects java must be reflected
# here. Currently the build looks for $JAVA_HOME/bin/java then falls back to
# the `java` executable on the path
java_home = os.environ.get("JAVA_HOME")
# check if there is an executable at $JAVA_HOME/bin/java
java_exe = which(os.path.join(java_home, "bin", "java")) if java_home else None
# if the java_exe wasn't set, check for a `java` version on the $PATH
return java_exe if java_exe else which("java")
JavaVersion = namedtuple('JavaVersion', ['major', 'minor', 'patch'])
def determine_java_version(java_exe):
"""Given a valid java executable will return its version in named tuple format
with accessors '.major', '.minor', '.patch', '.update'"""
raw_output = subprocess.check_output([java_exe, "-version"],
stderr=subprocess.STDOUT,
universal_newlines=True)
raw_output_lines = raw_output.split('\n')
# find raw version string, eg 'java version "1.8.0_25"'
raw_version_str = next(x for x in raw_output_lines if " version " in x)
match = re.search('(\d+)\.(\d+)\.(\d+)', raw_version_str)
major = int(match.group(1))
minor = int(match.group(2))
patch = int(match.group(3))
return JavaVersion(major, minor, patch)
# -------------------------------------------------------------------------------------------------
# Functions for running the other build and test scripts
# -------------------------------------------------------------------------------------------------
def set_title_and_block(title, err_block):
os.environ["CURRENT_BLOCK"] = str(ERROR_CODES[err_block])
line_str = '=' * 72
print('')
print(line_str)
print(title)
print(line_str)
def run_apache_rat_checks():
set_title_and_block("Running Apache RAT checks", "BLOCK_RAT")
run_cmd([os.path.join(SPARK_HOME, "dev", "check-license")])
def run_scala_style_checks():
set_title_and_block("Running Scala style checks", "BLOCK_SCALA_STYLE")
run_cmd([os.path.join(SPARK_HOME, "dev", "lint-scala")])
def run_java_style_checks():
set_title_and_block("Running Java style checks", "BLOCK_JAVA_STYLE")
run_cmd([os.path.join(SPARK_HOME, "dev", "lint-java")])
def run_python_style_checks():
set_title_and_block("Running Python style checks", "BLOCK_PYTHON_STYLE")
run_cmd([os.path.join(SPARK_HOME, "dev", "lint-python")])
def run_sparkr_style_checks():
set_title_and_block("Running R style checks", "BLOCK_R_STYLE")
if which("R"):
# R style check should be executed after `install-dev.sh`.
# Since warnings about `no visible global function definition` appear
# without the installation. SEE ALSO: SPARK-9121.
run_cmd([os.path.join(SPARK_HOME, "dev", "lint-r")])
else:
print("Ignoring SparkR style check as R was not found in PATH")
def build_spark_documentation():
set_title_and_block("Building Spark Documentation", "BLOCK_DOCUMENTATION")
os.environ["PRODUCTION"] = "1 jekyll build"
os.chdir(os.path.join(SPARK_HOME, "docs"))
jekyll_bin = which("jekyll")
if not jekyll_bin:
print("[error] Cannot find a version of `jekyll` on the system; please",
" install one and retry to build documentation.")
sys.exit(int(os.environ.get("CURRENT_BLOCK", 255)))
else:
run_cmd([jekyll_bin, "build"])
os.chdir(SPARK_HOME)
def get_zinc_port():
"""
Get a randomized port on which to start Zinc
"""
return random.randrange(3030, 4030)
def kill_zinc_on_port(zinc_port):
"""
Kill the Zinc process running on the given port, if one exists.
"""
cmd = ("/usr/sbin/lsof -P |grep %s | grep LISTEN "
"| awk '{ print $2; }' | xargs kill") % zinc_port
subprocess.check_call(cmd, shell=True)
def exec_maven(mvn_args=()):
"""Will call Maven in the current directory with the list of mvn_args passed
in and returns the subprocess for any further processing"""
zinc_port = get_zinc_port()
os.environ["ZINC_PORT"] = "%s" % zinc_port
zinc_flag = "-DzincPort=%s" % zinc_port
flags = [os.path.join(SPARK_HOME, "build", "mvn"), "--force", zinc_flag]
run_cmd(flags + mvn_args)
kill_zinc_on_port(zinc_port)
def exec_sbt(sbt_args=()):
"""Will call SBT in the current directory with the list of mvn_args passed
in and returns the subprocess for any further processing"""
sbt_cmd = [os.path.join(SPARK_HOME, "build", "sbt")] + sbt_args
sbt_output_filter = re.compile("^.*[info].*Resolving" + "|" +
"^.*[warn].*Merging" + "|" +
"^.*[info].*Including")
# NOTE: echo "q" is needed because sbt on encountering a build file
# with failure (either resolution or compilation) prompts the user for
# input either q, r, etc to quit or retry. This echo is there to make it
# not block.
echo_proc = subprocess.Popen(["echo", "\"q\n\""], stdout=subprocess.PIPE)
sbt_proc = subprocess.Popen(sbt_cmd,
stdin=echo_proc.stdout,
stdout=subprocess.PIPE)
echo_proc.wait()
for line in iter(sbt_proc.stdout.readline, ''):
if not sbt_output_filter.match(line):
print(line, end='')
retcode = sbt_proc.wait()
if retcode != 0:
exit_from_command_with_retcode(sbt_cmd, retcode)
def get_hadoop_profiles(hadoop_version):
"""
For the given Hadoop version tag, return a list of Maven/SBT profile flags for
building and testing against that Hadoop version.
"""
sbt_maven_hadoop_profiles = {
"hadoop2.6": ["-Phadoop-2.6"],
"hadoop2.7": ["-Phadoop-2.7"],
}
if hadoop_version in sbt_maven_hadoop_profiles:
return sbt_maven_hadoop_profiles[hadoop_version]
else:
print("[error] Could not find", hadoop_version, "in the list. Valid options",
" are", sbt_maven_hadoop_profiles.keys())
sys.exit(int(os.environ.get("CURRENT_BLOCK", 255)))
def build_spark_maven(hadoop_version):
# Enable all of the profiles for the build:
build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags
mvn_goals = ["clean", "package", "-DskipTests"]
profiles_and_goals = build_profiles + mvn_goals
print("[info] Building Spark (w/Hive 1.2.1) using Maven with these arguments: ",
" ".join(profiles_and_goals))
exec_maven(profiles_and_goals)
def build_spark_sbt(hadoop_version):
# Enable all of the profiles for the build:
build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags
sbt_goals = ["test:package", # Build test jars as some tests depend on them
"streaming-kafka-0-8-assembly/assembly",
"streaming-flume-assembly/assembly",
"streaming-kinesis-asl-assembly/assembly"]
profiles_and_goals = build_profiles + sbt_goals
print("[info] Building Spark (w/Hive 1.2.1) using SBT with these arguments: ",
" ".join(profiles_and_goals))
exec_sbt(profiles_and_goals)
def build_spark_unidoc_sbt(hadoop_version):
set_title_and_block("Building Unidoc API Documentation", "BLOCK_DOCUMENTATION")
# Enable all of the profiles for the build:
build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags
sbt_goals = ["unidoc"]
profiles_and_goals = build_profiles + sbt_goals
print("[info] Building Spark unidoc (w/Hive 1.2.1) using SBT with these arguments: ",
" ".join(profiles_and_goals))
exec_sbt(profiles_and_goals)
def build_spark_assembly_sbt(hadoop_version):
# Enable all of the profiles for the build:
build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags
sbt_goals = ["assembly/package"]
profiles_and_goals = build_profiles + sbt_goals
print("[info] Building Spark assembly (w/Hive 1.2.1) using SBT with these arguments: ",
" ".join(profiles_and_goals))
exec_sbt(profiles_and_goals)
# Note that we skip Unidoc build only if Hadoop 2.6 is explicitly set in this SBT build.
# Due to a different dependency resolution in SBT & Unidoc by an unknown reason, the
# documentation build fails on a specific machine & environment in Jenkins but it was unable
# to reproduce. Please see SPARK-20343. This is a band-aid fix that should be removed in
# the future.
is_hadoop_version_2_6 = os.environ.get("AMPLAB_JENKINS_BUILD_PROFILE") == "hadoop2.6"
if not is_hadoop_version_2_6:
# Make sure that Java and Scala API documentation can be generated
build_spark_unidoc_sbt(hadoop_version)
def build_apache_spark(build_tool, hadoop_version):
"""Will build Spark against Hive v1.2.1 given the passed in build tool (either `sbt` or
`maven`). Defaults to using `sbt`."""
set_title_and_block("Building Spark", "BLOCK_BUILD")
rm_r("lib_managed")
if build_tool == "maven":
build_spark_maven(hadoop_version)
else:
build_spark_sbt(hadoop_version)
def detect_binary_inop_with_mima(hadoop_version):
build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags
set_title_and_block("Detecting binary incompatibilities with MiMa", "BLOCK_MIMA")
run_cmd([os.path.join(SPARK_HOME, "dev", "mima")] + build_profiles)
def run_scala_tests_maven(test_profiles):
mvn_test_goals = ["test", "--fail-at-end"]
profiles_and_goals = test_profiles + mvn_test_goals
print("[info] Running Spark tests using Maven with these arguments: ",
" ".join(profiles_and_goals))
exec_maven(profiles_and_goals)
def run_scala_tests_sbt(test_modules, test_profiles):
sbt_test_goals = list(itertools.chain.from_iterable(m.sbt_test_goals for m in test_modules))
if not sbt_test_goals:
return
profiles_and_goals = test_profiles + sbt_test_goals
print("[info] Running Spark tests using SBT with these arguments: ",
" ".join(profiles_and_goals))
exec_sbt(profiles_and_goals)
def run_scala_tests(build_tool, hadoop_version, test_modules, excluded_tags):
"""Function to properly execute all tests passed in as a set from the
`determine_test_suites` function"""
set_title_and_block("Running Spark unit tests", "BLOCK_SPARK_UNIT_TESTS")
test_modules = set(test_modules)
test_profiles = get_hadoop_profiles(hadoop_version) + \
list(set(itertools.chain.from_iterable(m.build_profile_flags for m in test_modules)))
if excluded_tags:
test_profiles += ['-Dtest.exclude.tags=' + ",".join(excluded_tags)]
if build_tool == "maven":
run_scala_tests_maven(test_profiles)
else:
run_scala_tests_sbt(test_modules, test_profiles)
def run_python_tests(test_modules, parallelism):
set_title_and_block("Running PySpark tests", "BLOCK_PYSPARK_UNIT_TESTS")
command = [os.path.join(SPARK_HOME, "python", "run-tests")]
if test_modules != [modules.root]:
command.append("--modules=%s" % ','.join(m.name for m in test_modules))
command.append("--parallelism=%i" % parallelism)
run_cmd(command)
def run_python_packaging_tests():
set_title_and_block("Running PySpark packaging tests", "BLOCK_PYSPARK_PIP_TESTS")
command = [os.path.join(SPARK_HOME, "dev", "run-pip-tests")]
run_cmd(command)
def run_build_tests():
set_title_and_block("Running build tests", "BLOCK_BUILD_TESTS")
run_cmd([os.path.join(SPARK_HOME, "dev", "test-dependencies.sh")])
pass
def run_sparkr_tests():
set_title_and_block("Running SparkR tests", "BLOCK_SPARKR_UNIT_TESTS")
if which("R"):
run_cmd([os.path.join(SPARK_HOME, "R", "run-tests.sh")])
else:
print("Ignoring SparkR tests as R was not found in PATH")
def parse_opts():
parser = OptionParser(
prog="run-tests"
)
parser.add_option(
"-p", "--parallelism", type="int", default=4,
help="The number of suites to test in parallel (default %default)"
)
(opts, args) = parser.parse_args()
if args:
parser.error("Unsupported arguments: %s" % ' '.join(args))
if opts.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return opts
def main():
opts = parse_opts()
# Ensure the user home directory (HOME) is valid and is an absolute directory
if not USER_HOME or not os.path.isabs(USER_HOME):
print("[error] Cannot determine your home directory as an absolute path;",
" ensure the $HOME environment variable is set properly.")
sys.exit(1)
os.chdir(SPARK_HOME)
rm_r(os.path.join(SPARK_HOME, "work"))
rm_r(os.path.join(USER_HOME, ".ivy2", "local", "org.apache.spark"))
rm_r(os.path.join(USER_HOME, ".ivy2", "cache", "org.apache.spark"))
os.environ["CURRENT_BLOCK"] = str(ERROR_CODES["BLOCK_GENERAL"])
java_exe = determine_java_executable()
if not java_exe:
print("[error] Cannot find a version of `java` on the system; please",
" install one and retry.")
sys.exit(2)
java_version = determine_java_version(java_exe)
# install SparkR
if which("R"):
run_cmd([os.path.join(SPARK_HOME, "R", "install-dev.sh")])
else:
print("Cannot install SparkR as R was not found in PATH")
if os.environ.get("AMPLAB_JENKINS"):
# if we're on the Amplab Jenkins build servers setup variables
# to reflect the environment settings
build_tool = os.environ.get("AMPLAB_JENKINS_BUILD_TOOL", "sbt")
hadoop_version = os.environ.get("AMPLAB_JENKINS_BUILD_PROFILE", "hadoop2.6")
test_env = "amplab_jenkins"
# add path for Python3 in Jenkins if we're calling from a Jenkins machine
os.environ["PATH"] = "/home/anaconda/envs/py3k/bin:" + os.environ.get("PATH")
else:
# else we're running locally and can use local settings
build_tool = "sbt"
hadoop_version = os.environ.get("HADOOP_PROFILE", "hadoop2.6")
test_env = "local"
print("[info] Using build tool", build_tool, "with Hadoop profile", hadoop_version,
"under environment", test_env)
changed_modules = None
changed_files = None
if test_env == "amplab_jenkins" and os.environ.get("AMP_JENKINS_PRB"):
target_branch = os.environ["ghprbTargetBranch"]
changed_files = identify_changed_files_from_git_commits("HEAD", target_branch=target_branch)
changed_modules = determine_modules_for_files(changed_files)
excluded_tags = determine_tags_to_exclude(changed_modules)
if not changed_modules:
changed_modules = [modules.root]
excluded_tags = []
print("[info] Found the following changed modules:",
", ".join(x.name for x in changed_modules))
# setup environment variables
# note - the 'root' module doesn't collect environment variables for all modules. Because the
# environment variables should not be set if a module is not changed, even if running the 'root'
# module. So here we should use changed_modules rather than test_modules.
test_environ = {}
for m in changed_modules:
test_environ.update(m.environ)
setup_test_environ(test_environ)
test_modules = determine_modules_to_test(changed_modules)
# license checks
run_apache_rat_checks()
# style checks
if not changed_files or any(f.endswith(".scala")
or f.endswith("scalastyle-config.xml")
for f in changed_files):
run_scala_style_checks()
if not changed_files or any(f.endswith(".java")
or f.endswith("checkstyle.xml")
or f.endswith("checkstyle-suppressions.xml")
for f in changed_files):
# run_java_style_checks()
pass
if not changed_files or any(f.endswith(".py") for f in changed_files):
run_python_style_checks()
if not changed_files or any(f.endswith(".R") for f in changed_files):
run_sparkr_style_checks()
# determine if docs were changed and if we're inside the amplab environment
# note - the below commented out until *all* Jenkins workers can get `jekyll` installed
# if "DOCS" in changed_modules and test_env == "amplab_jenkins":
# build_spark_documentation()
if any(m.should_run_build_tests for m in test_modules):
run_build_tests()
# spark build
build_apache_spark(build_tool, hadoop_version)
# backwards compatibility checks
if build_tool == "sbt":
# Note: compatibility tests only supported in sbt for now
detect_binary_inop_with_mima(hadoop_version)
# Since we did not build assembly/package before running dev/mima, we need to
# do it here because the tests still rely on it; see SPARK-13294 for details.
build_spark_assembly_sbt(hadoop_version)
# run the test suites
run_scala_tests(build_tool, hadoop_version, test_modules, excluded_tags)
modules_with_python_tests = [m for m in test_modules if m.python_test_goals]
if modules_with_python_tests:
run_python_tests(modules_with_python_tests, opts.parallelism)
run_python_packaging_tests()
if any(m.should_run_r_tests for m in test_modules):
run_sparkr_tests()
def _test():
import doctest
failure_count = doctest.testmod()[0]
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
main()
|
|
from datetime import datetime, timedelta
from OpenSSL import crypto
from djutils.views.generic import SortMixin
from django.http import HttpResponse
from django.utils import timezone
from django.conf import settings
from django.contrib import messages
from django.shortcuts import get_object_or_404
from django.urls import reverse_lazy, reverse
from django.views.generic import FormView, DetailView, DeleteView, ListView
from django.views.generic.edit import FormMixin, ContextMixin
from core.utils import Ca
from core import forms
from core import models
class BreadcrumbsMixin(ContextMixin):
def get_breadcrumbs(self):
return None
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['breadcrumbs'] = self.get_breadcrumbs()
return context
class Search(BreadcrumbsMixin, SortMixin, FormMixin, ListView):
form_class = forms.CertificatesSearch
model = models.SiteCrt
template_name = 'core/certificate/search.html'
sort_params = ['cn', 'date_start', 'date_end']
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['data'] = self.request.GET
return kwargs
def get_queryset(self):
queryset = super().get_queryset()
form = self.form_class(self.request.GET)
if form.is_valid():
cn = form.cleaned_data['cn']
if cn:
queryset = queryset.filter(cn__icontains=cn)
return queryset
class Create(BreadcrumbsMixin, FormView):
form_class = forms.CertificatesCreate
template_name = 'core/certificate/create.html'
def get_success_url(self):
return reverse_lazy('certificates_view', kwargs={'pk': self.object.pk})
def get_breadcrumbs(self):
return (
('Home', reverse('index')),
('Create new certificate', '')
)
def get_initial(self):
return {'validity_period': timezone.now() + timedelta(days=settings.VALIDITY_PERIOD_CRT)}
def form_valid(self, form):
ca = Ca()
if ca.get_type_alt_names(form.cleaned_data['cn']):
self.object = ca.generate_site_crt(form.cleaned_data['cn'], form.cleaned_data['validity_period'], alt_name='IP')
else:
self.object = ca.generate_site_crt(form.cleaned_data['cn'], form.cleaned_data['validity_period'])
return super().form_valid(form)
class UploadExisting(BreadcrumbsMixin, FormView):
template_name = 'core/certificate/upload_existing.html'
form_class = forms.CertificatesUploadExisting
success_url = reverse_lazy('certificates_search')
def get_success_url(self):
return reverse_lazy('certificates_view', kwargs={'pk': self.object.pk})
def get_breadcrumbs(self):
return (
('Home', reverse('index')),
('Load an existing certificate', '')
)
def form_valid(self, form):
current_tz = timezone.get_current_timezone()
if form.cleaned_data['crt_file']:
crt_file_data = form.cleaned_data['crt_file'].read().decode()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, crt_file_data)
self.object = models.SiteCrt.objects.create(
key=form.cleaned_data['key_file'].read().decode(),
crt=crt_file_data,
cn=cert.get_subject().CN,
date_end=current_tz.localize(datetime.strptime(cert.get_notAfter().decode(), '%Y%m%d%H%M%SZ'))
)
elif form.cleaned_data['crt_text']:
cert = crypto.load_certificate(crypto.FILETYPE_PEM, form.cleaned_data['crt_text'])
cn = cert.get_subject().CN
self.object = models.SiteCrt.objects.create(
key=form.cleaned_data['key_text'],
crt=form.cleaned_data['crt_text'],
cn=cn,
date_end=current_tz.localize(datetime.strptime(cert.get_notAfter().decode(), '%Y%m%d%H%M%SZ'))
)
return super().form_valid(form)
class View(BreadcrumbsMixin, FormMixin, DetailView):
template_name = 'core/certificate/view.html'
form_class = forms.ViewCrtText
model = models.SiteCrt
def get_breadcrumbs(self):
return (
('Home', reverse('index')),
('View %s' % self.get_object().cn, '')
)
def get_initial(self):
crt = models.SiteCrt.objects.get(pk=self.kwargs['pk'])
crt_data = crt.crt
key_data = crt.key
return {'crt': crt_data, 'key': key_data}
def get_object(self, queryset=None):
return get_object_or_404(self.model, pk=self.kwargs['pk'])
def get_context_data(self, **kwargs):
cert_data = self.object.crt.encode()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_data)
kwargs['cert'] = cert.get_subject()
kwargs['crt_validity_period'] = datetime.strptime(cert.get_notAfter().decode(), '%Y%m%d%H%M%SZ')
return super().get_context_data(**kwargs)
class Delete(BreadcrumbsMixin, DeleteView):
model = models.SiteCrt
template_name = 'core/certificate/delete.html'
success_url = reverse_lazy('certificates_search')
def get_breadcrumbs(self):
return (
('Home', reverse('index')),
('View %s' % self.get_object().cn, reverse('certificates_view', kwargs={'pk': self.kwargs['pk']})),
('Delete %s' % self.get_object().cn, '')
)
def get_object(self, queryset=None):
return get_object_or_404(self.model, pk=self.kwargs['pk'])
class Recreate(BreadcrumbsMixin, FormView, DetailView):
model = models.SiteCrt
form_class = forms.RecreationCrt
template_name = 'core/certificate/recreate.html'
def get_breadcrumbs(self):
return (
('Home', reverse('index')),
('View %s' % models.SiteCrt.objects.get(pk=self.kwargs['pk']).cn,
reverse('certificates_view', kwargs={'pk': self.kwargs['pk']})),
('Recreate certificate', '')
)
def get_success_url(self):
return reverse_lazy('certificates_view', kwargs={'pk': self.kwargs['pk']})
def get_initial(self):
return {'validity_period': timezone.now() + timedelta(days=settings.VALIDITY_PERIOD_CRT)}
def get_object(self, queryset=None):
return get_object_or_404(self.model, pk=self.kwargs['pk'])
def form_valid(self, form):
self.object = models.SiteCrt.objects.get(pk=self.kwargs['pk'])
ca = Ca()
ca.generate_site_crt(self.object.cn, form.cleaned_data['validity_period'], self.kwargs['pk'])
messages.success(self.request, 'Recreation success')
return super().form_valid(form)
class DownloadCrt(View):
def get(self, context, **response_kwargs):
pk = self.kwargs['pk']
obj = models.SiteCrt.objects.get(pk=pk)
res = HttpResponse(obj.crt, content_type='application/txt')
res['Content-Disposition'] = 'attachment; filename={}.crt'.format(obj.cn)
return res
class DownloadKey(View):
def get(self, context, **response_kwargs):
pk = self.kwargs['pk']
obj = models.SiteCrt.objects.get(pk=pk)
res = HttpResponse(obj.key, content_type='application/txt')
res['Content-Disposition'] = 'attachment; filename={}.key'.format(obj.cn)
return res
|
|
# -*- coding: utf-8 -*-
"""Test parsing of VCF header lines from strings
"""
import sys
import pytest
from vcfpy import header
from vcfpy import parser
__author__ = "Manuel Holtgrewe <[email protected]>"
# parser.StupidHeaderLineParser.parse_key_value() --------------------------
def test_stupid_vcf_header_line_parser_file_format():
p = parser.StupidHeaderLineParser()
INPUT = ("fileFormat", "VCFv4.2")
EXPECTED = "HeaderLine('fileFormat', 'VCFv4.2')"
assert str(p.parse_key_value(*INPUT)) == EXPECTED
# parser.MappingHeaderLineParser.parse_key_value() -------------------------
def test_mapping_vcf_header_line_parser_parse_key_value_filter():
p = parser.MappingHeaderLineParser(header.FilterHeaderLine)
INPUT = ("FILTER", '<ID=q10,Description="Quality below 10">')
if sys.version_info < (3, 6):
EXPECTED = (
"FilterHeaderLine('FILTER', '<ID=q10,Description=\"Quality below 10\">', "
"OrderedDict([('ID', 'q10'), ('Description', 'Quality below 10')]))"
)
else:
EXPECTED = (
"FilterHeaderLine('FILTER', '<ID=q10,Description=\"Quality below 10\">', "
"{'ID': 'q10', 'Description': 'Quality below 10'})"
)
assert str(p.parse_key_value(*INPUT)) == EXPECTED
def test_mapping_vcf_header_line_parser_parse_key_value_format():
p = parser.MappingHeaderLineParser(header.FormatHeaderLine)
INPUT = ("FORMAT", '<ID=GT,Number=1,Type=String,Description="Genotype">')
if sys.version_info < (3, 6):
EXPECTED = (
"FormatHeaderLine('FORMAT', '<ID=GT,Number=1,Type=String,Description=\"Genotype\">', "
"OrderedDict([('ID', 'GT'), ('Number', 1), ('Type', 'String'), ('Description', 'Genotype')]))"
)
else:
EXPECTED = (
"FormatHeaderLine('FORMAT', '<ID=GT,Number=1,Type=String,Description=\"Genotype\">', "
"{'ID': 'GT', 'Number': 1, 'Type': 'String', 'Description': 'Genotype'})"
)
assert str(p.parse_key_value(*INPUT)) == EXPECTED
def test_mapping_vcf_header_line_parser_parse_key_value_info():
p = parser.MappingHeaderLineParser(header.InfoHeaderLine)
INPUT = ("INFO", "<ID=NS,Number=1,Type=Integer,Description=" '"Number of Samples With Data">')
if sys.version_info < (3, 6):
EXPECTED = (
"InfoHeaderLine('INFO', '<ID=NS,Number=1,Type=Integer,Description=\"Number of Samples With Data\">', "
"OrderedDict([('ID', 'NS'), ('Number', 1), ('Type', 'Integer'), "
"('Description', 'Number of Samples With Data')]))"
)
else:
EXPECTED = (
"InfoHeaderLine('INFO', '<ID=NS,Number=1,Type=Integer,Description=\"Number of Samples With Data\">', "
"{'ID': 'NS', 'Number': 1, 'Type': 'Integer', 'Description': 'Number of Samples With Data'})"
)
assert str(p.parse_key_value(*INPUT)) == EXPECTED
def test_mapping_vcf_header_line_parser_parse_key_value_contig():
p = parser.MappingHeaderLineParser(header.ContigHeaderLine)
INPUT = (
"contig",
'<ID=20,length=62435964,assembly=B36,md5=f126cdf8a6e0c7f379d618ff66beb2da,species="Homo sapiens",taxonomy=x>',
)
if sys.version_info < (3, 6):
EXPECTED = (
"ContigHeaderLine('contig', '<ID=20,length=62435964,assembly=B36,md5=f126cdf8a6e0c7f379d618ff66beb2da,"
"species=\"Homo sapiens\",taxonomy=x>', OrderedDict([('ID', '20'), ('length', '62435964'), "
"('assembly', 'B36'), ('md5', 'f126cdf8a6e0c7f379d618ff66beb2da'), ('species', 'Homo sapiens'), "
"('taxonomy', 'x')]))"
)
else:
EXPECTED = (
"ContigHeaderLine('contig', '<ID=20,length=62435964,assembly=B36,md5=f126cdf8a6e0c7f379d618ff66beb2da,"
"species=\"Homo sapiens\",taxonomy=x>', {'ID': '20', 'length': '62435964', 'assembly': 'B36', "
"'md5': 'f126cdf8a6e0c7f379d618ff66beb2da', 'species': 'Homo sapiens', 'taxonomy': 'x'})"
)
assert str(p.parse_key_value(*INPUT)) == EXPECTED
# parser.HeaderParser.parse_line() -----------------------------------------
def test_vcf_header_parser_file_format():
p = parser.HeaderParser()
INPUT = "##fileFormat=VCFv4.2\n"
EXPECTED = "HeaderLine('fileFormat', 'VCFv4.2')"
assert str(p.parse_line(INPUT)) == EXPECTED
def test_vcf_header_parser_parse_line_filter():
p = parser.HeaderParser()
INPUT = '##FILTER=<ID=q10,Description="Quality below 10">\n'
if sys.version_info < (3, 6):
EXPECTED = (
"FilterHeaderLine('FILTER', '<ID=q10,Description=\"Quality below 10\">', OrderedDict([('ID', 'q10'), "
"('Description', 'Quality below 10')]))"
)
else:
EXPECTED = (
"FilterHeaderLine('FILTER', '<ID=q10,Description=\"Quality below 10\">', "
"{'ID': 'q10', 'Description': 'Quality below 10'})"
)
assert str(p.parse_line(INPUT)) == EXPECTED
def test_mapping_vcf_header_parser_parse_line_format():
p = parser.HeaderParser()
INPUT = '##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n'
if sys.version_info < (3, 6):
EXPECTED = (
"FormatHeaderLine('FORMAT', '<ID=GT,Number=1,Type=String,Description=\"Genotype\">', "
"OrderedDict([('ID', 'GT'), ('Number', 1), ('Type', 'String'), ('Description', 'Genotype')]))"
)
else:
EXPECTED = (
"FormatHeaderLine('FORMAT', '<ID=GT,Number=1,Type=String,Description=\"Genotype\">', "
"{'ID': 'GT', 'Number': 1, 'Type': 'String', 'Description': 'Genotype'})"
)
assert str(p.parse_line(INPUT)) == EXPECTED
def test_mapping_vcf_header_parser_parse_line_info():
p = parser.HeaderParser()
INPUT = "##INFO=" "<ID=NS,Number=1,Type=Integer,Description=" '"Number of Samples With Data">\n'
if sys.version_info < (3, 6):
EXPECTED = (
"InfoHeaderLine('INFO', '<ID=NS,Number=1,Type=Integer,Description=\"Number of Samples With Data\">', "
"OrderedDict([('ID', 'NS'), ('Number', 1), ('Type', 'Integer'), ('Description', "
"'Number of Samples With Data')]))"
)
else:
EXPECTED = (
"InfoHeaderLine('INFO', '<ID=NS,Number=1,Type=Integer,Description=\"Number of Samples With Data\">', "
"{'ID': 'NS', 'Number': 1, 'Type': 'Integer', 'Description': 'Number of Samples With Data'})"
)
assert str(p.parse_line(INPUT)) == EXPECTED
def test_mapping_vcf_header_parser_parse_line_contig():
p = parser.HeaderParser()
INPUT = (
"##contig=<ID=20,length=62435964,assembly=B36,md5=f126cdf8a6e0c7f379d618ff66beb2da,"
'species="Homo sapiens",taxonomy=x>\n'
)
if sys.version_info < (3, 6):
EXPECTED = (
"ContigHeaderLine('contig', '<ID=20,length=62435964,assembly=B36,md5=f126cdf8a6e0c7f379d618ff66beb2da,"
"species=\"Homo sapiens\",taxonomy=x>', OrderedDict([('ID', '20'), ('length', '62435964'), "
"('assembly', 'B36'), ('md5', 'f126cdf8a6e0c7f379d618ff66beb2da'), ('species', 'Homo sapiens'), "
"('taxonomy', 'x')]))"
)
else:
EXPECTED = (
"ContigHeaderLine('contig', '<ID=20,length=62435964,assembly=B36,md5=f126cdf8a6e0c7f379d618ff66beb2da,"
"species=\"Homo sapiens\",taxonomy=x>', {'ID': '20', 'length': '62435964', 'assembly': 'B36', "
"'md5': 'f126cdf8a6e0c7f379d618ff66beb2da', 'species': 'Homo sapiens', 'taxonomy': 'x'})"
)
assert str(p.parse_line(INPUT)) == EXPECTED
def test_mapping_vcf_header_parser_parse_line_alt_allele():
p = parser.HeaderParser()
INPUT = "##ALT=" '<ID=R,Description="IUPAC code R = A/G">\n'
if sys.version_info < (3, 6):
EXPECTED = (
"AltAlleleHeaderLine('ALT', "
"'<ID=R,Description=\"IUPAC code R = A/G\">', "
"OrderedDict([('ID', 'R'), "
"('Description', 'IUPAC code R = A/G')]))"
)
else:
EXPECTED = (
"AltAlleleHeaderLine('ALT', "
"'<ID=R,Description=\"IUPAC code R = A/G\">', "
"{'ID': 'R', 'Description': 'IUPAC code R = A/G'})"
)
assert str(p.parse_line(INPUT)) == EXPECTED
def test_mapping_vcf_header_parser_parse_line_meta():
p = parser.HeaderParser()
INPUT = "##META=" "<ID=Assay,Type=String,Number=.,Values=[WholeGenome, Exome]>\n"
if sys.version_info < (3, 6):
EXPECTED = (
"MetaHeaderLine('META', '<ID=Assay,Type=String,Number=.,"
"Values=[WholeGenome, Exome]>', OrderedDict([('ID', 'Assay'), "
"('Type', 'String'), ('Number', '.'), ('Values', ['WholeGenome', "
"'Exome'])]))"
)
else:
EXPECTED = (
"MetaHeaderLine('META', '<ID=Assay,Type=String,Number=.,Values=[WholeGenome, Exome]>', "
"{'ID': 'Assay', 'Type': 'String', 'Number': '.', 'Values': ['WholeGenome', 'Exome']})"
)
assert str(p.parse_line(INPUT)) == EXPECTED
def test_mapping_vcf_header_parser_parse_line_pedigree():
p = parser.HeaderParser()
INPUT = "##PEDIGREE=" "<ID=TumourSample,Original=GermlineID>\n"
if sys.version_info < (3, 6):
EXPECTED = (
"PedigreeHeaderLine('PEDIGREE', "
"'<ID=TumourSample,Original=GermlineID>',"
" OrderedDict([('ID', 'TumourSample'), "
"('Original', 'GermlineID')]))"
)
else:
EXPECTED = (
"PedigreeHeaderLine('PEDIGREE', '<ID=TumourSample,Original=GermlineID>', "
"{'ID': 'TumourSample', 'Original': 'GermlineID'})"
)
assert str(p.parse_line(INPUT)) == EXPECTED
def test_mapping_vcf_header_parser_parse_line_sample():
p = parser.HeaderParser()
INPUT = (
"##SAMPLE="
"<ID=Sample1,Assay=WholeGenome,Ethnicity=AFR,Disease=None,"
'Description="Patient germline genome from unaffected",'
"DOI=url>\n"
)
if sys.version_info < (3, 6):
EXPECTED = (
"SampleHeaderLine('SAMPLE', '<ID=Sample1,Assay=WholeGenome,"
'Ethnicity=AFR,Disease=None,Description="Patient germline genome from '
"unaffected\",DOI=url>', OrderedDict([('ID', 'Sample1'), ('Assay', "
"'WholeGenome'), ('Ethnicity', 'AFR'), ('Disease', 'None'), "
"('Description', 'Patient germline genome from unaffected'), "
"('DOI', 'url')]))"
)
else:
EXPECTED = (
"SampleHeaderLine('SAMPLE', '<ID=Sample1,Assay=WholeGenome,"
'Ethnicity=AFR,Disease=None,Description="Patient germline genome from '
"unaffected\",DOI=url>', {'ID': 'Sample1', 'Assay': 'WholeGenome', 'Ethnicity': 'AFR', 'Disease': 'None', "
"'Description': 'Patient germline genome from unaffected', 'DOI': 'url'})"
)
assert str(p.parse_line(INPUT)) == EXPECTED
|
|
import json
import requests
from access_token import get_access_token
from vocabulary import ThreatExchange as t
from errors import (
pytxFetchError,
pytxValueError
)
class Broker(object):
"""
The Broker handles validation and submission of requests as well as
consumption and returning of the result. It is leveraged by the other
classes.
Since the Broker takes care of the entire request/response cycle, it can be
used on its own to interact with the ThreatExchange API without the need for
the other classes if a developer wishes to use it.
"""
@staticmethod
def get_new(klass, attrs):
"""
Return a new instance of klass.
:param klass: The class to create a new instance of.
:type klass: :class:
:param attrs: The attributes to set for this new instance.
:type attrs: dict
:returns: new instance of klass
"""
n = klass(**attrs)
n._new = False
n._changed = []
return n
@staticmethod
def is_timestamp(timestamp):
"""
Verifies the timestamp provided is a valid timestamp.
Valid timestamps are based on PHP's "strtotime" function. As of right
now even with python's "dateutil" library there are some strtotime valid
strings that do not validate properly. Until such a time as this can
become accurate and robust enough to have feature parity with strtotime,
this will always return True and leave proper timestamps to the API
user.
:param timestamp: Value to verify is a timestamp.
:type timestamp: str
:returns: True
"""
return True
@staticmethod
def validate_limit(limit):
"""
Verifies the limit provided is valid and within the max limit Facebook
will allow you to use.
:param limit: Value to verify is a valid limit.
:type limit: int, str
:returns: :class:`pytxValueError` if invalid.
"""
try:
int(limit)
except ValueError, e:
raise pytxValueError(e)
return
@staticmethod
def sanitize_strict(strict_text):
"""
If strict_text is provided, sanitize it.
'true' will be used if strict_text is in [True, 'true', 'True', 1].
'false' will be used if strict_text is in [False, 'false', 'False', 0].
If we receive any other value strict_text will be set to None and
ignored when building the GET request.
:param strict_text: The value to sanitize.
:type strict_text: bool, str, int
:returns: str, None
"""
if strict_text in (True, 'true', 'True', 1):
strict = 'true'
elif strict_text in (False, 'false', 'False', 0):
strict = 'false'
else:
strict = None
return strict
@staticmethod
def handle_results(resp):
"""
Handle the results of a request.
:param resp: The HTTP response.
:type resp: response object
:returns: dict (using json.loads())
"""
if resp.status_code != 200:
raise pytxFetchError('Response code: %s: %s, URL: %s' % (
resp.status_code,
resp.text,
resp.url)
)
try:
results = json.loads(resp.text)
except:
raise pytxFetchError('Unable to convert response to JSON.')
return results
@classmethod
def validate_get(cls, limit, since, until):
"""
Executes validation for the GET parameters: limit, since, until.
:param limit: The limit to validate.
:type limit: int, str
:param since: The since timestamp to validate.
:type since: str
:param until: The until timestamp to validate.
:type until: str
"""
if since:
cls.is_timestamp(since)
if until:
cls.is_timestamp(until)
if limit:
cls.validate_limit(limit)
@classmethod
def build_get_parameters(cls, text=None, strict_text=None, type_=None, threat_type=None,
fields=None, limit=None, since=None, until=None):
"""
Validate arguments and convert them into GET parameters.
:param text: The text used for limiting the search.
:type text: str
:param strict_text: Whether we should use strict searching.
:type strict_text: bool, str, int
:param type_: The Indicator type to limit to.
:type type_: str
:param threat_type: The Threat type to limit to.
:type threat_type: str
:param fields: Select specific fields to pull
:type fields: str, list
:param limit: The maximum number of objects to return.
:type limit: int, str
:param since: The timestamp to limit the beginning of the search.
:type since: str
:param until: The timestamp to limit the end of the search.
:type until: str
:returns: dict
"""
cls.validate_get(limit, since, until)
strict = cls.sanitize_strict(strict_text)
params = {}
if text:
params[t.TEXT] = text
if strict is not None:
params[t.STRICT_TEXT] = strict
if type_:
params[t.TYPE] = type_
if threat_type:
params[t.THREAT_TYPE] = threat_type
if fields:
params[t.FIELDS] = ','.join(fields) if isinstance(fields, list) else fields
if limit:
params[t.LIMIT] = limit
if since:
params[t.SINCE] = since
if until:
params[t.UNTIL] = until
return params
@classmethod
def get(cls, url, params=None):
"""
Send a GET request.
:param url: The URL to send the GET request to.
:type url: str
:param params: The GET parameters to send in the request.
:type params: dict
:returns: dict (using json.loads())
"""
if not params:
params = dict()
params[t.ACCESS_TOKEN] = get_access_token()
resp = requests.get(url, params=params)
return cls.handle_results(resp)
@classmethod
def post(cls, url, params=None):
"""
Send a POST request.
:param url: The URL to send the POST request to.
:type url: str
:param params: The POST parameters to send in the request.
:type params: dict
:returns: dict (using json.loads())
"""
if not params:
params = dict()
params[t.ACCESS_TOKEN] = get_access_token()
resp = requests.post(url, params=params)
return cls.handle_results(resp)
@classmethod
def delete(cls, url, params=None):
"""
Send a DELETE request.
:param url: The URL to send the DELETE request to.
:type url: str
:param params: The DELETE parameters to send in the request.
:type params: dict
:returns: dict (using json.loads())
"""
if not params:
params = dict()
params[t.ACCESS_TOKEN] = get_access_token()
resp = requests.delete(url, params=params)
return cls.handle_results(resp)
@classmethod
def get_generator(cls, klass, url, total, to_dict=False, params=None):
"""
Generator for managing GET requests. For each GET request it will yield
the next object in the results until there are no more objects. If the
GET response contains a 'next' value in the 'paging' section, the
generator will automatically fetch the next set of results and continue
the process until the total limit has been reached or there is no longer
a 'next' value.
:param klass: The class to use for the generator.
:type klass: class
:param url: The URL to send the GET request to.
:type url: str
:param total: The total number of objects to return (-1 to disable).
:type total: None, int
:param to_dict: Return a dictionary instead of an instantiated class.
:type to_dict: bool
:param params: The GET parameters to send in the request.
:type params: dict
:returns: Generator
"""
if not klass:
raise pytxValueError('Must provide a valid object to query.')
if not params:
params = dict()
if total is None:
total = t.NO_TOTAL
if total == t.MIN_TOTAL:
yield None
next_ = True
while next_:
results = cls.get(url, params)
for data in results[t.DATA]:
if total == t.MIN_TOTAL:
raise StopIteration
if to_dict:
yield data
else:
yield cls.get_new(klass, data)
total -= t.DEC_TOTAL
try:
next_ = results[t.PAGING][t.NEXT]
except:
next_ = False
if next_:
url = next_
params = {}
|
|
# Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
# This modules disables the Pytype analyzer, see
# https://github.com/tensorflow/federated/blob/main/docs/pytype.md for more
# information.
"""Factory for aggregations parameterized by TensorFlow Privacy DPQueries."""
import collections
from typing import Collection, Optional, Tuple
import warnings
import tensorflow as tf
import tensorflow_privacy as tfp
from tensorflow_federated.python.aggregators import factory
from tensorflow_federated.python.aggregators import sum_factory
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
from tensorflow_federated.python.core.templates import aggregation_process
from tensorflow_federated.python.core.templates import measured_process
def adaptive_clip_noise_params(
noise_multiplier: float,
expected_clients_per_round: float,
clipped_count_stddev: Optional[float] = None) -> Tuple[float, float]:
"""Computes noising parameters for the adaptive L2 clipping procedure.
The adaptive clipping method (described in https://arxiv.org/abs/1905.03871)
runs a private quantile estimation procedure which may require the number of
clipped clients in a round to be also noised for privacy. Thus, to maintain
the same level of privacy as intended by the total noise multiplier, the
effective noise multiplier to be applied on the client records may need to be
(slightly) higher to account for the private quantile estimation.
Args:
noise_multiplier: The total noise multiplier for the mechanism.
expected_clients_per_round: A float specifying the expected number of
clients per round.
clipped_count_stddev: The stddev of the noise added to the clipped counts in
the adaptive clipping algorithm.
Returns:
A tuple with the `value_noise_multiplier` (to be applied to client records)
and the `clipped_count_stddev` (a default value if not specified).
"""
if noise_multiplier > 0.0:
if clipped_count_stddev is None:
clipped_count_stddev = 0.05 * expected_clients_per_round
if noise_multiplier >= 2 * clipped_count_stddev:
raise ValueError(
f'clipped_count_stddev = {clipped_count_stddev} (defaults to '
f'0.05 * `expected_clients_per_round` if not specified) is too low '
f'to achieve the desired effective `noise_multiplier` '
f'({noise_multiplier}). You must either increase '
f'`clipped_count_stddev` or decrease `noise_multiplier`.')
value_noise_multiplier = (noise_multiplier**-2 -
(2 * clipped_count_stddev)**-2)**-0.5
added_noise_factor = value_noise_multiplier / noise_multiplier
if added_noise_factor >= 2:
warnings.warn(
f'A significant amount of noise ({added_noise_factor:.2f}x) has to '
f'be added for record aggregation to achieve the desired effective '
f'`noise_multiplier` ({noise_multiplier}). If you are manually '
f'specifying `clipped_count_stddev` you may want to increase it. Or '
f'you may need more `expected_clients_per_round`.')
else:
if clipped_count_stddev is None:
clipped_count_stddev = 0.0
value_noise_multiplier = 0.0
return value_noise_multiplier, clipped_count_stddev
class DifferentiallyPrivateFactory(factory.UnweightedAggregationFactory):
"""`UnweightedAggregationFactory` for tensorflow_privacy DPQueries.
The created `tff.templates.AggregationProcess` aggregates values placed at
`CLIENTS` according to the provided DPQuery, and outputs the result placed at
`SERVER`.
A DPQuery defines preprocessing to perform on each value, and postprocessing
to perform on the aggregated, preprocessed values. Provided the preprocessed
values ("records") are aggregated in a way that is consistent with the
DPQuery, formal (epsilon, delta) privacy guarantees can be derived. This
aggregation is controlled by `record_aggregation_factory`.
A simple summation (using the default `tff.aggregators.SumFactory`) is usually
acceptable. Aggregations that change the records (such as compression or
secure aggregation) may be allowed so long as they do not increase the
sensitivity of the query. It is the users' responsibility to ensure that the
mode of aggregation is consistent with the DPQuery. Note that the DPQuery's
built-in aggregation functions (accumulate_preprocessed_record and
merge_sample_states) are ignored in favor of the provided aggregator.
"""
@classmethod
def gaussian_adaptive(
cls,
noise_multiplier: float,
clients_per_round: float,
initial_l2_norm_clip: float = 0.1,
target_unclipped_quantile: float = 0.5,
learning_rate: float = 0.2,
clipped_count_stddev: Optional[float] = None
) -> factory.UnweightedAggregationFactory:
"""`DifferentiallyPrivateFactory` with adaptive clipping and Gaussian noise.
Performs adaptive clipping and addition of Gaussian noise for differentially
private learning. For details of the DP algorithm see McMahan et. al (2017)
https://arxiv.org/abs/1710.06963. The adaptive clipping uses the geometric
method described in Thakkar et al. (2019) https://arxiv.org/abs/1905.03871.
The adaptive clipping parameters have been chosen to yield a process that
starts small and adapts relatively quickly to the median, without using
much of the privacy budget. This works well on most problems.
Args:
noise_multiplier: A float specifying the noise multiplier for the Gaussian
mechanism for model updates. A value of 1.0 or higher may be needed for
strong privacy. See above mentioned papers to compute (epsilon, delta)
privacy guarantee. Note that this is the effective total noise
multiplier, accounting for the privacy loss due to adaptive clipping.
The noise actually added to the aggregated values will be slightly
higher.
clients_per_round: A float specifying the expected number of clients per
round. Must be positive.
initial_l2_norm_clip: The initial value of the adaptive clipping norm.
target_unclipped_quantile: The quantile to which the clipping norm should
adapt.
learning_rate: The learning rate for the adaptive clipping process.
clipped_count_stddev: The stddev of the noise added to the clipped counts
in the adaptive clipping algorithm. If None, defaults to `0.05 *
clients_per_round` (unless `noise_multiplier` is 0, in which case it is
also 0).
Returns:
A `DifferentiallyPrivateFactory` with adaptive clipping and Gaussian
noise.
"""
if isinstance(clients_per_round, int):
clients_per_round = float(clients_per_round)
_check_float_nonnegative(noise_multiplier, 'noise_multiplier')
_check_float_positive(clients_per_round, 'clients_per_round')
_check_float_positive(initial_l2_norm_clip, 'initial_l2_norm_clip')
_check_float_probability(target_unclipped_quantile,
'target_unclipped_quantile')
_check_float_nonnegative(learning_rate, 'learning_rate')
if clipped_count_stddev is not None:
_check_float_nonnegative(clipped_count_stddev, 'clipped_count_stddev')
value_noise_multiplier, clipped_count_stddev = adaptive_clip_noise_params(
noise_multiplier, clients_per_round, clipped_count_stddev)
query = tfp.QuantileAdaptiveClipSumQuery(
initial_l2_norm_clip=initial_l2_norm_clip,
noise_multiplier=value_noise_multiplier,
target_unclipped_quantile=target_unclipped_quantile,
learning_rate=learning_rate,
clipped_count_stddev=clipped_count_stddev,
expected_num_records=clients_per_round,
geometric_update=True)
query = tfp.NormalizedQuery(query, denominator=clients_per_round)
return cls(query)
@classmethod
def gaussian_fixed(cls, noise_multiplier: float, clients_per_round: float,
clip: float) -> factory.UnweightedAggregationFactory:
"""`DifferentiallyPrivateFactory` with fixed clipping and Gaussian noise.
Performs fixed clipping and addition of Gaussian noise for differentially
private learning. For details of the DP algorithm see McMahan et. al (2017)
https://arxiv.org/abs/1710.06963.
Args:
noise_multiplier: A float specifying the noise multiplier for the Gaussian
mechanism for model updates. A value of 1.0 or higher may be needed for
strong privacy. See above mentioned paper to compute (epsilon, delta)
privacy guarantee.
clients_per_round: A float specifying the expected number of clients per
round. Must be positive.
clip: The value of the clipping norm.
Returns:
A `DifferentiallyPrivateFactory` with fixed clipping and Gaussian noise.
"""
if isinstance(clients_per_round, int):
clients_per_round = float(clients_per_round)
_check_float_nonnegative(noise_multiplier, 'noise_multiplier')
_check_float_positive(clients_per_round, 'clients_per_round')
_check_float_positive(clip, 'clip')
query = tfp.NormalizedQuery(
tfp.GaussianSumQuery(l2_norm_clip=clip, stddev=clip * noise_multiplier),
denominator=clients_per_round)
return cls(query)
@classmethod
def tree_aggregation(
cls,
noise_multiplier: float,
clients_per_round: float,
l2_norm_clip: float,
record_specs: Collection[tf.TensorSpec],
noise_seed: Optional[int] = None,
use_efficient: bool = True,
) -> factory.UnweightedAggregationFactory:
"""`DifferentiallyPrivateFactory` with tree aggregation noise.
Performs clipping on client, averages clients records, and adds noise for
differential privacy. The noise is estimated based on tree aggregation for
the cumulative summation over rounds, and then take the residual between the
current round and the previous round. Combining this aggregator with a SGD
optimizer on server can be used to implement the DP-FTRL algorithm in
"Practical and Private (Deep) Learning without Sampling or Shuffling"
(https://arxiv.org/abs/2103.00039).
The standard deviation of the Gaussian noise added at each tree node is
`l2_norm_clip * noise_multiplier`. Note that noise is added during summation
of client model updates per round, *before* normalization (the noise will be
scaled down when dividing by `clients_per_round`). Thus `noise_multiplier`
can be used to compute the (epsilon, delta) privacy guarantee as described
in the paper.
Args:
noise_multiplier: Noise multiplier for the Gaussian noise in tree
aggregation. Must be non-negative, zero means no noise is applied.
clients_per_round: A positive number specifying the expected number of
clients per round.
l2_norm_clip: The value of the clipping norm. Must be positive.
record_specs: The specs of client results to be aggregated.
noise_seed: Random seed for the Gaussian noise generator. If `None`, a
nondeterministic seed based on system time will be generated.
use_efficient: If true, use the efficient tree aggregation algorithm based
on the paper "Efficient Use of Differentially Private Binary Trees".
Returns:
A `DifferentiallyPrivateFactory` with Gaussian noise by tree aggregation.
"""
if isinstance(clients_per_round, int):
clients_per_round = float(clients_per_round)
_check_float_nonnegative(noise_multiplier, 'noise_multiplier')
_check_float_positive(clients_per_round, 'clients_per_round')
_check_float_positive(l2_norm_clip, 'l2_norm_clip')
sum_query = tfp.TreeResidualSumQuery.build_l2_gaussian_query(
l2_norm_clip,
noise_multiplier,
record_specs,
noise_seed=noise_seed,
use_efficient=use_efficient)
mean_query = tfp.NormalizedQuery(sum_query, denominator=clients_per_round)
return cls(mean_query)
def __init__(self,
query: tfp.DPQuery,
record_aggregation_factory: Optional[
factory.UnweightedAggregationFactory] = None):
"""Initializes `DifferentiallyPrivateFactory`.
Args:
query: A `tfp.SumAggregationDPQuery` to perform private estimation.
record_aggregation_factory: A
`tff.aggregators.UnweightedAggregationFactory` to aggregate values after
preprocessing by the `query`. If `None`, defaults to
`tff.aggregators.SumFactory`. The provided factory is assumed to
implement a sum, and to have the property that it does not increase the
sensitivity of the query - typically this means that it should not
increase the l2 norm of the records when aggregating.
Raises:
TypeError: If `query` is not an instance of `tfp.SumAggregationDPQuery` or
`record_aggregation_factory` is not an instance of
`tff.aggregators.UnweightedAggregationFactory`.
"""
py_typecheck.check_type(query, tfp.SumAggregationDPQuery)
self._query = query
if record_aggregation_factory is None:
record_aggregation_factory = sum_factory.SumFactory()
py_typecheck.check_type(record_aggregation_factory,
factory.UnweightedAggregationFactory)
self._record_aggregation_factory = record_aggregation_factory
def create(
self,
value_type: factory.ValueType) -> aggregation_process.AggregationProcess:
py_typecheck.check_type(value_type, factory.ValueType.__args__)
query_initial_state_fn = computations.tf_computation(
self._query.initial_global_state)
query_state_type = query_initial_state_fn.type_signature.result
derive_sample_params = computations.tf_computation(
self._query.derive_sample_params, query_state_type)
get_query_record = computations.tf_computation(
self._query.preprocess_record,
derive_sample_params.type_signature.result, value_type)
query_record_type = get_query_record.type_signature.result
record_agg_process = self._record_aggregation_factory.create(
query_record_type)
agg_output_type = record_agg_process.next.type_signature.result.result.member
get_noised_result = computations.tf_computation(
self._query.get_noised_result, agg_output_type, query_state_type)
derive_metrics = computations.tf_computation(self._query.derive_metrics,
query_state_type)
@computations.federated_computation()
def init_fn():
return intrinsics.federated_zip(
(intrinsics.federated_eval(query_initial_state_fn, placements.SERVER),
record_agg_process.initialize()))
@computations.federated_computation(init_fn.type_signature.result,
computation_types.FederatedType(
value_type, placements.CLIENTS))
def next_fn(state, value):
query_state, agg_state = state
params = intrinsics.federated_broadcast(
intrinsics.federated_map(derive_sample_params, query_state))
record = intrinsics.federated_map(get_query_record, (params, value))
record_agg_output = record_agg_process.next(agg_state, record)
result, new_query_state, _ = intrinsics.federated_map(
get_noised_result, (record_agg_output.result, query_state))
query_metrics = intrinsics.federated_map(derive_metrics, new_query_state)
new_state = (new_query_state, record_agg_output.state)
measurements = collections.OrderedDict(
dp_query_metrics=query_metrics, dp=record_agg_output.measurements)
return measured_process.MeasuredProcessOutput(
intrinsics.federated_zip(new_state), result,
intrinsics.federated_zip(measurements))
return aggregation_process.AggregationProcess(init_fn, next_fn)
def _check_float_positive(value, label):
py_typecheck.check_type(value, float, label)
if value <= 0:
raise ValueError(f'{label} must be positive. Found {value}.')
def _check_float_nonnegative(value, label):
py_typecheck.check_type(value, float, label)
if value < 0:
raise ValueError(f'{label} must be nonnegative. Found {value}.')
def _check_float_probability(value, label):
py_typecheck.check_type(value, float, label)
if not 0 <= value <= 1:
raise ValueError(f'{label} must be between 0 and 1 (inclusive). '
f'Found {value}.')
|
|
"""This file is a fork from a MIT-licensed project named OpenAI Baselines:
https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
"""
from collections import deque
import gym
import numpy as np
from gym import spaces
import chainerrl
try:
import cv2
cv2.ocl.setUseOpenCL(False)
_is_cv2_available = True
except Exception:
_is_cv2_available = False
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
"""Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(
1, self.noop_max + 1) # pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, info = self.env.step(self.noop_action)
if done or info.get('needs_reset', False):
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for envs that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, info = self.env.step(1)
if done or info.get('needs_reset', False):
self.env.reset(**kwargs)
obs, _, done, info = self.env.step(2)
if done or info.get('needs_reset', False):
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game end.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.needs_real_reset = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.needs_real_reset = done or info.get('needs_reset', False)
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condtion for a few
# frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.needs_real_reset:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros(
(2,) + env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2:
self._obs_buffer[0] = obs
if i == self._skip - 1:
self._obs_buffer[1] = obs
total_reward += reward
if done or info.get('needs_reset', False):
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, channel_order='hwc'):
"""Warp frames to 84x84 as done in the Nature paper and later work.
To use this wrapper, OpenCV-Python is required.
"""
if not _is_cv2_available:
raise RuntimeError('Cannot import cv2 module. Please install OpenCV-Python to use WarpFrame.') # NOQA
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
shape = {
'hwc': (self.height, self.width, 1),
'chw': (1, self.height, self.width),
}
self.observation_space = spaces.Box(
low=0, high=255,
shape=shape[channel_order], dtype=np.uint8)
def observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height),
interpolation=cv2.INTER_AREA)
return frame.reshape(self.observation_space.low.shape)
class FrameStack(gym.Wrapper):
def __init__(self, env, k, channel_order='hwc'):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
self.stack_axis = {'hwc': 2, 'chw': 0}[channel_order]
orig_obs_space = env.observation_space
low = np.repeat(orig_obs_space.low, k, axis=self.stack_axis)
high = np.repeat(orig_obs_space.high, k, axis=self.stack_axis)
self.observation_space = spaces.Box(
low=low, high=high, dtype=orig_obs_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames), stack_axis=self.stack_axis)
class ScaledFloatFrame(gym.ObservationWrapper):
"""Divide frame values by 255.0 and return them as np.float32.
Especially, when the original env.observation_space is np.uint8,
this wrapper converts frame values into [0.0, 1.0] of dtype np.float32.
"""
def __init__(self, env):
assert isinstance(env.observation_space, spaces.Box)
gym.ObservationWrapper.__init__(self, env)
self.scale = 255.0
orig_obs_space = env.observation_space
self.observation_space = spaces.Box(
low=self.observation(orig_obs_space.low),
high=self.observation(orig_obs_space.high),
dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / self.scale
class LazyFrames(object):
"""Array-like object that lazily concat multiple frames.
This object ensures that common frames between the observations are only
stored once. It exists purely to optimize memory usage which can be huge
for DQN's 1M frames replay buffers.
This object should only be converted to numpy array before being passed to
the model.
You'd not believe how complex the previous solution was.
"""
def __init__(self, frames, stack_axis=2):
self.stack_axis = stack_axis
self._frames = frames
def __array__(self, dtype=None):
out = np.concatenate(self._frames, axis=self.stack_axis)
if dtype is not None:
out = out.astype(dtype)
return out
class FlickerFrame(gym.ObservationWrapper):
"""Stochastically flicker frames."""
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
def observation(self, observation):
if self.unwrapped.np_random.rand() < 0.5:
return np.zeros_like(observation)
else:
return observation
def make_atari(env_id, max_frames=30 * 60 * 60):
env = gym.make(env_id)
assert 'NoFrameskip' in env.spec.id
assert isinstance(env, gym.wrappers.TimeLimit)
# Unwrap TimeLimit wrapper because we use our own time limits
env = env.env
if max_frames:
env = chainerrl.wrappers.ContinuingTimeLimit(
env, max_episode_steps=max_frames)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
return env
def wrap_deepmind(env, episode_life=True, clip_rewards=True,
frame_stack=True, scale=False, fire_reset=False,
channel_order='chw',
flicker=False,
):
"""Configure environment for DeepMind-style Atari."""
if episode_life:
env = EpisodicLifeEnv(env)
if fire_reset and 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env, channel_order=channel_order)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if flicker:
env = FlickerFrame(env)
if frame_stack:
env = FrameStack(env, 4, channel_order=channel_order)
return env
|
|
'''An asynchronous multi-process `HTTP proxy server`_. It works for both
``http`` and ``https`` (tunneled) requests.
Managing Headers
=====================
It is possible to add middleware to manipulate the original request headers.
If the header middleware is
an empty list, the proxy passes requests and responses unmodified.
This is an implementation for a forward-proxy which can be used
to retrieve any type of source from the Internet.
To run the server::
python manage.py
An header middleware is a callable which receives the wsgi *environ* and
the list of request *headers*. By default the example uses:
.. autofunction:: x_forwarded_for
To run with different headers middleware create a new script and do::
from proxyserver.manage import server
if __name__ == '__main__':
server(headers_middleware=[...]).start()
Implemenation
===========================
.. autoclass:: ProxyServerWsgiHandler
:members:
:member-order:
.. _`HTTP proxy server`: http://en.wikipedia.org/wiki/Proxy_server
'''
import logging
from functools import partial
import asyncio
import pulsar
from pulsar import HttpException, task
from pulsar.apps import wsgi, http
from pulsar.apps.http.plugins import noerror
from pulsar.utils.httpurl import Headers, ENCODE_BODY_METHODS
from pulsar.utils.log import LocalMixin, local_property
SERVER_SOFTWARE = 'Pulsar-proxy-server/%s' % pulsar.version
ENVIRON_HEADERS = ('content-type', 'content-length')
USER_AGENT = SERVER_SOFTWARE
logger = logging.getLogger('pulsar.proxyserver')
def x_forwarded_for(environ, headers):
'''Add *x-forwarded-for* header'''
headers.add_header('x-forwarded-for', environ['REMOTE_ADDR'])
class ProxyServerWsgiHandler(LocalMixin):
'''WSGI middleware for an asynchronous proxy server.
To perform processing on headers you can pass a list of
``headers_middleware``.
An headers middleware is a callable which accepts two parameters, the wsgi
*environ* dictionary and the *headers* container.
'''
def __init__(self, headers_middleware=None):
self.headers_middleware = headers_middleware or []
@local_property
def http_client(self):
'''The :class:`.HttpClient` used by this proxy middleware for
accessing upstream resources'''
client = http.HttpClient(decompress=False, store_cookies=False)
client.headers.clear()
return client
def __call__(self, environ, start_response):
uri = environ['RAW_URI']
logger.debug('new request for %r' % uri)
if not uri or uri.startswith('/'): # No proper uri, raise 404
raise HttpException(status=404)
response = TunnelResponse(self, environ, start_response)
response.request()
return response.future
############################################################################
# RESPONSE OBJECTS
class TunnelResponse:
'''Base WSGI Response Iterator for the Proxy server
'''
def __init__(self, wsgi, environ, start_response):
self.wsgi = wsgi
self.environ = environ
self.start_response = start_response
self.future = asyncio.Future()
@task
def request(self):
'''Perform the Http request to the upstream server
'''
request_headers = self.request_headers()
environ = self.environ
method = environ['REQUEST_METHOD']
data = None
if method in ENCODE_BODY_METHODS:
data = DataIterator(self)
http = self.wsgi.http_client
try:
yield from http.request(method,
environ['RAW_URI'],
data=data,
headers=request_headers,
version=environ['SERVER_PROTOCOL'],
pre_request=self.pre_request)
except Exception as exc:
self.error(exc)
def request_headers(self):
'''Fill request headers from the environ dictionary and
modify them via the list of :attr:`headers_middleware`.
The returned headers will be sent to the target uri.
'''
headers = Headers(kind='client')
for k in self.environ:
if k.startswith('HTTP_'):
head = k[5:].replace('_', '-')
headers[head] = self.environ[k]
for head in ENVIRON_HEADERS:
k = head.replace('-', '_').upper()
v = self.environ.get(k)
if v:
headers[head] = v
for middleware in self.wsgi.headers_middleware:
middleware(self.environ, headers)
return headers
def error(self, exc):
if self.future.done():
self.future.set_exception(exc)
else:
logger.error(str(exc))
@noerror
def pre_request(self, response, exc=None):
'''Start the tunnel.
This is a callback fired once a connection with upstream server is
established.
'''
if response.request.method == 'CONNECT':
# proxy - server connection
upstream = response.connection
# client - proxy connection
dostream = self.environ['pulsar.connection']
# Upgrade downstream connection
dostream.upgrade(partial(StreamTunnel, upstream))
# Upgrade upstream connection
upstream.upgrade(partial(StreamTunnel, dostream))
self.start_response('200 Connection established', [])
# send empty byte so that headers are sent
self.future.set_result([b''])
response.abort_request()
else:
response.bind_event('data_processed', self.data_processed)
response.bind_event('post_request', self.post_request)
def data_processed(self, response, data=None, **kw):
self.environ['pulsar.connection'].write(data)
def post_request(self, _, exc=None):
self.future.set_exception(wsgi.AbortWsgi())
class DataIterator:
def __init__(self, response):
self.response = response
self.stream = response.environ.get('wsgi.input')
def __iter__(self):
yield self.stream.reader.read()
class StreamTunnel(pulsar.ProtocolConsumer):
''':class:`.ProtocolConsumer` handling encrypted messages from
downstream client and upstream server.
This consumer is created as an upgrade of the standard Http protocol
consumer.
.. attribute:: tunnel
Connection to the downstream client or upstream server.
'''
headers = None
status_code = None
http_request = None
def __init__(self, tunnel, loop=None):
super().__init__(loop)
self.tunnel = tunnel
def connection_made(self, connection):
self.logger.debug('Tunnel connection %s made', connection)
connection.bind_event('connection_lost', self._close_tunnel)
if self.http_request:
self.start(self.http_request)
def data_received(self, data):
try:
return self.tunnel.write(data)
except Exception:
if not self.tunnel.closed:
raise
def _close_tunnel(self, arg, exc=None):
if not self.tunnel.closed:
self._loop.call_soon(self.tunnel.close)
def server(name='proxy-server', headers_middleware=None,
server_software=None, **kwargs):
'''Function to Create a WSGI Proxy Server.'''
if headers_middleware is None:
headers_middleware = [x_forwarded_for]
wsgi_proxy = ProxyServerWsgiHandler(headers_middleware)
kwargs['server_software'] = server_software or SERVER_SOFTWARE
return wsgi.WSGIServer(wsgi_proxy, name=name, **kwargs)
if __name__ == '__main__':
server().start()
|
|
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Salactus, eater of s3 buckets.
"""
from __future__ import print_function
from collections import Counter
import csv
import functools
import json
import logging
import operator
import click
from rq.registry import FinishedJobRegistry, StartedJobRegistry
from rq.queue import Queue
from rq.worker import Worker
from c7n_salactus import worker, db
def debug(f):
def _f(*args, **kw):
try:
f(*args, **kw)
except (SystemExit, KeyboardInterrupt) as e:
raise
except:
import traceback, sys, pdb
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[-1])
functools.update_wrapper(_f, f)
return _f
@click.group()
def cli():
"""Salactus, eater of s3 buckets"""
@cli.command()
@click.option('--config', help='config file for accounts/buckets')
@click.option('--tag', help='filter accounts by tag')
@click.option('--account', '-a',
help='scan only the given accounts', multiple=True)
@click.option('--bucket', '-b',
help='scan only the given buckets', multiple=True)
@click.option('--debug', is_flag=True,
help='synchronous scanning, no workers')
def run(config, tag, bucket, account, debug=False):
"""Run across a set of accounts and buckets."""
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s: %(name)s:%(levelname)s %(message)s")
logging.getLogger('botocore').setLevel(level=logging.WARNING)
if debug:
def invoke(f, *args, **kw):
if f.func_name == 'process_keyset':
print("skip keyset")
return
return f(*args, **kw)
worker.invoke = invoke
with open(config) as fh:
data = json.load(fh)
for account_info in data:
if tag and tag not in account_info.get('tags', ()):
continue
if account and account_info['name'] not in account:
continue
if bucket:
account_info['buckets'] = bucket
worker.invoke(worker.process_account, account_info)
@cli.command()
@click.option('--dbpath', help='path to json file')
def save(dbpath):
"""Save the current state to a json file
"""
d = db.db()
d.save(dbpath)
@cli.command()
@click.option('--dbpath', help='path to json file')
def reset(dbpath):
"""Save the current state to a json file
"""
click.echo('Delete db? Are you Sure? [yn] ', nl=False)
c = click.getchar()
click.echo()
if c == 'y':
click.echo('Wiping database')
worker.connection.flushdb()
elif c == 'n':
click.echo('Abort!')
else:
click.echo('Invalid input :(')
@cli.command()
def workers():
counter = Counter()
for w in Worker.all(connection=worker.connection):
for q in w.queues:
counter[q.name] += 1
import pprint
pprint.pprint(dict(counter))
@cli.command()
@click.option('--dbpath', '-f', help='json stats db')
@click.option('--account', '-a',
help="stats on a particular account", multiple=True)
def accounts(dbpath, account):
"""Report on stats by account"""
d = db.db(dbpath)
def _repr(a):
return "name:%s, matched:%d percent:%0.2f scanned:%d size:%d buckets:%d" % (
a.name,
a.matched,
a.percent_scanned,
a.scanned,
a.size,
len(a.buckets))
for a in sorted(d.accounts(), key=operator.attrgetter('name')):
click.echo(_repr(a))
def format_plain(buckets, fh):
def _repr(b):
return (
"account:%s name:%s percent:%0.2f matched:%d "
"scanned:%d size:%d kdenied:%d errors:%d partitions:%d") % (
b.account,
b.name,
b.percent_scanned,
b.matched,
b.scanned,
b.size,
b.keys_denied,
b.error_count,
b.partitions)
for b in buckets:
print(_repr(b), file=fh)
def format_csv(buckets, fh):
field_names = ['account', 'name', 'matched', 'scanned',
'size', 'keys_denied', 'error_count', 'partitions']
totals = Counter()
skip = set(('account', 'name', 'percent'))
for b in buckets:
for n in field_names:
if n in skip:
continue
totals[n] += getattr(b, n)
totals['account'] = 'Total'
totals['name'] = ''
writer = csv.DictWriter(fh, fieldnames=field_names, extrasaction='ignore')
writer.writerow(dict(zip(field_names, field_names)))
writer.writerow(totals)
for b in buckets:
bd = {n: getattr(b, n) for n in field_names}
writer.writerow(bd)
@cli.command()
@click.option('--dbpath', '-f', help="json stats db")
@click.option('--output', '-o', type=click.File('wb'), default='-',
help="file to to output to (default stdout)")
@click.option('--format', help="format for output",
type=click.Choice(['plain', 'csv']), default='plain')
@click.option('--bucket', '-b',
help="stats on a particular bucket", multiple=True)
@click.option('--account', '-a',
help="stats on a particular account", multiple=True)
@click.option('--matched', is_flag=True,
help="filter to buckets with matches")
@click.option('--kdenied', is_flag=True,
help="filter to buckets w/ denied key access")
@click.option('--denied', is_flag=True,
help="filter to buckets denied access")
@click.option('--errors', is_flag=True,
help="filter to buckets with errors")
@click.option('--size', type=int,
help="filter to buckets with at least size")
def buckets(bucket=None, account=None, matched=False, kdenied=False,
errors=False, dbpath=None, size=None, denied=False,
format=None, output=None):
"""Report on stats by bucket"""
d = db.db(dbpath)
buckets = []
for b in sorted(d.buckets(account),
key=operator.attrgetter('bucket_id')):
if bucket and b.name not in bucket:
continue
if matched and not b.matched:
continue
if kdenied and not b.keys_denied:
continue
if errors and not b.errors:
continue
if size and b.size < size:
continue
if denied and not b.denied:
continue
buckets.append(b)
formatter = format == 'csv' and format_csv or format_plain
formatter(buckets, output)
@cli.command()
def queues():
"""Reeport on progress by queues."""
conn = worker.connection
failure_q = None
def _repr(q):
return "running:%d pending:%d finished:%d" % (
StartedJobRegistry(q.name, conn).count,
q.count,
FinishedJobRegistry(q.name, conn).count)
for q in Queue.all(conn):
if q.name == 'failed':
failure_q = q
continue
click.echo("%s %s" % (q.name, _repr(q)))
if failure_q:
click.echo(
click.style(failure_q.name, fg='red') + ' %s' % _repr(failure_q))
@cli.command()
def failures():
"""Show any unexpected failures"""
q = Queue('failed', connection=worker.connection)
for i in q.get_jobs():
click.echo("%s on %s" % (i.func_name, i.origin))
click.echo(i.exc_info)
if __name__ == '__main__':
cli()
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Subuser'
db.create_table('blogs_subuser', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('blog', self.gf('django.db.models.fields.related.ForeignKey')(related_name='Blog_user', null=True, to=orm['blogs.Blog'])),
('username', self.gf('django.db.models.fields.CharField')(unique=True, max_length=40)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
('password', self.gf('django.db.models.fields.CharField')(max_length=40)),
))
db.send_create_signal('blogs', ['Subuser'])
def backwards(self, orm):
# Deleting model 'Subuser'
db.delete_table('blogs_subuser')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'blogs.blog': {
'Meta': {'object_name': 'Blog'},
'analytics_account': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'contributors': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'blogcontributor'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'custom_domain': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'draft_notice': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'exclusion_end': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'exclusion_start': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'facebook_link': ('django.db.models.fields.URLField', [], {'max_length': '100', 'blank': 'True'}),
'fb_page_access_token': ('django.db.models.fields.CharField', [], {'max_length': '260', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'has_artists': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'header_image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_bootblog': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_online': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_open': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '7', 'blank': 'True'}),
'main_color': ('django.db.models.fields.CharField', [], {'default': "'#C4BDB2'", 'max_length': '10', 'blank': 'True'}),
'moderator_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'pinterest_link': ('django.db.models.fields.URLField', [], {'max_length': '100', 'blank': 'True'}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '30'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Template']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True', 'blank': 'True'}),
'twitter_link': ('django.db.models.fields.URLField', [], {'max_length': '100', 'blank': 'True'}),
'twitter_oauth_token': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'twitter_oauth_token_secret': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'blogs.category': {
'Meta': {'object_name': 'Category'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_category'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'cat_image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_caret': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_close': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_email': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_fb': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_left': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_pint': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_right': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_tw': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'default': "'#000000'", 'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'parent_category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_category'", 'null': 'True', 'to': "orm['blogs.Category']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'})
},
'blogs.comment': {
'Meta': {'object_name': 'Comment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'Comment_author'", 'null': 'True', 'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'comment_status': ('django.db.models.fields.CharField', [], {'default': "'pe'", 'max_length': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'notify_me': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Post']", 'null': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'})
},
'blogs.info_email': {
'Meta': {'object_name': 'Info_email'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'We'", 'max_length': '2', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'subject': ('django.db.models.fields.TextField', [], {'max_length': '100', 'blank': 'True'}),
'subscribers': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '2', 'null': 'True'})
},
'blogs.language': {
'Meta': {'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'language_name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'blogs.menu': {
'Meta': {'object_name': 'Menu'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_menu'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_main': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'blogs.menuitem': {
'Meta': {'object_name': 'MenuItem'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'}),
'external_link': ('django.db.models.fields.URLField', [], {'max_length': '140', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'menu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Menu']", 'null': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Page']", 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'})
},
'blogs.model': {
'Meta': {'object_name': 'Model'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Custom_post'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
'blogs.modeldata': {
'Meta': {'object_name': 'ModelData'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Model']", 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Unknown'", 'max_length': '140'})
},
'blogs.modelfield': {
'Meta': {'object_name': 'ModelField'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Model']", 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'post_type': ('django.db.models.fields.CharField', [], {'default': "'Text'", 'max_length': '40'}),
'rank': ('django.db.models.fields.CharField', [], {'default': "'1'", 'max_length': '2'})
},
'blogs.modelfielddata': {
'Meta': {'object_name': 'ModelFieldData'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'longtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'model': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Model']", 'null': 'True'}),
'model_data': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.ModelData']", 'null': 'True'}),
'model_field': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.ModelField']", 'null': 'True'}),
'nullboolean': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'onetofive': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'positiveinteger': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'relation': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'relation'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['blogs.ModelData']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '140', 'blank': 'True'})
},
'blogs.page': {
'Meta': {'object_name': 'Page'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_page'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'})
},
'blogs.post': {
'Meta': {'object_name': 'Post'},
'artist': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'base62id': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_0': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_01': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_1': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_2': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_3': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_4': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_5': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_6': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_video': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'custom_post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Model']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discarded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_ready': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_sticky': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_top': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'karma': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'layout_type': ('django.db.models.fields.CharField', [], {'default': "'s'", 'max_length': '1'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'pic': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_0': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_04': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_1': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_10': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_11': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_12': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_13': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_14': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_15': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_16': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_17': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_18': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_19': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_2': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_20': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_21': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_22': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_23': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_24': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_25': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_26': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_27': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_28': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_29': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_3': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_30': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_31': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_32': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_33': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_4': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_5': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_6': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_7': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_8': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_9': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'publish_on_facebook': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'}),
'soundcloud_id': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'soundcloud_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '2', 'null': 'True'}),
'tag': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['blogs.Tag']", 'null': 'True', 'blank': 'True'}),
'temp_tag_field': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'translated_content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'translated_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'video': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'video_ogg': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'video_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'vimeo_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'vimeo_thumb_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'youtube_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'blogs.rss': {
'Meta': {'object_name': 'Rss'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_rss'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'feed_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'blogs.subscription': {
'Meta': {'object_name': 'Subscription'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_new': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'blogs.subuser': {
'Meta': {'object_name': 'Subuser'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_user'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
'blogs.tag': {
'Meta': {'object_name': 'Tag'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_tag'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'})
},
'blogs.template': {
'Meta': {'object_name': 'Template'},
'archives': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'base': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'category': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'single': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'})
},
'blogs.translation': {
'Meta': {'object_name': 'Translation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'origin_blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Translation_origin_blog'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'translated_blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Translation_translated_blog'", 'null': 'True', 'to': "orm['blogs.Blog']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['blogs']
|
|
#!/usr/bin/env python
"""
Unittests for wtforms_appengine
To run the tests, use NoseGAE:
pip install nose nosegae
nosetests --with-gae --without-sandbox
"""
from __future__ import unicode_literals
# This needs to stay as the first import, it sets up paths.
from gaetest_common import DummyPostData, fill_authors, DBTestCase
from unittest import TestCase
from google.appengine.ext import db
from wtforms import Form, fields as f, validators
from wtforms_appengine.db import model_form
from wtforms_appengine.fields import (
GeoPtPropertyField, ReferencePropertyField,
StringListPropertyField, # IntegerListPropertyField
)
class Author(db.Model):
name = db.StringProperty(required=True)
city = db.StringProperty()
age = db.IntegerProperty(required=True)
is_admin = db.BooleanProperty(default=False)
class Book(db.Model):
author = db.ReferenceProperty(Author)
class AllPropertiesModel(db.Model):
"""Property names are ugly, yes."""
prop_string = db.StringProperty()
prop_byte_string = db.ByteStringProperty()
prop_boolean = db.BooleanProperty()
prop_integer = db.IntegerProperty()
prop_float = db.FloatProperty()
prop_date_time = db.DateTimeProperty()
prop_date = db.DateProperty()
prop_time = db.TimeProperty()
prop_list = db.ListProperty(int)
prop_string_list = db.StringListProperty()
prop_reference = db.ReferenceProperty()
prop_self_refeference = db.SelfReferenceProperty()
prop_user = db.UserProperty()
prop_blob = db.BlobProperty()
prop_text = db.TextProperty()
prop_category = db.CategoryProperty()
prop_link = db.LinkProperty()
prop_email = db.EmailProperty()
prop_geo_pt = db.GeoPtProperty()
prop_im = db.IMProperty()
prop_phone_number = db.PhoneNumberProperty()
prop_postal_address = db.PostalAddressProperty()
prop_rating = db.RatingProperty()
class DateTimeModel(db.Model):
prop_date_time_1 = db.DateTimeProperty()
prop_date_time_2 = db.DateTimeProperty(auto_now=True)
prop_date_time_3 = db.DateTimeProperty(auto_now_add=True)
prop_date_1 = db.DateProperty()
prop_date_2 = db.DateProperty(auto_now=True)
prop_date_3 = db.DateProperty(auto_now_add=True)
prop_time_1 = db.TimeProperty()
prop_time_2 = db.TimeProperty(auto_now=True)
prop_time_3 = db.TimeProperty(auto_now_add=True)
class TestModelForm(DBTestCase):
nosegae_datastore_v3 = True
def test_model_form_basic(self):
form_class = model_form(Author)
self.assertEqual(hasattr(form_class, 'name'), True)
self.assertEqual(hasattr(form_class, 'age'), True)
self.assertEqual(hasattr(form_class, 'city'), True)
self.assertEqual(hasattr(form_class, 'is_admin'), True)
form = form_class()
self.assertEqual(isinstance(form.name, f.TextField), True)
self.assertEqual(isinstance(form.city, f.TextField), True)
self.assertEqual(isinstance(form.age, f.IntegerField), True)
self.assertEqual(isinstance(form.is_admin, f.BooleanField), True)
def test_required_field(self):
form_class = model_form(Author)
form = form_class()
self.assertEqual(form.name.flags.required, True)
self.assertEqual(form.city.flags.required, False)
self.assertEqual(form.age.flags.required, True)
self.assertEqual(form.is_admin.flags.required, False)
def test_default_value(self):
form_class = model_form(Author)
form = form_class()
self.assertEqual(form.name.default, None)
self.assertEqual(form.city.default, None)
self.assertEqual(form.age.default, None)
self.assertEqual(form.is_admin.default, False)
def test_model_form_only(self):
form_class = model_form(Author, only=['name', 'age'])
self.assertEqual(hasattr(form_class, 'name'), True)
self.assertEqual(hasattr(form_class, 'city'), False)
self.assertEqual(hasattr(form_class, 'age'), True)
self.assertEqual(hasattr(form_class, 'is_admin'), False)
form = form_class()
self.assertEqual(isinstance(form.name, f.TextField), True)
self.assertEqual(isinstance(form.age, f.IntegerField), True)
def test_model_form_exclude(self):
form_class = model_form(Author, exclude=['is_admin'])
self.assertEqual(hasattr(form_class, 'name'), True)
self.assertEqual(hasattr(form_class, 'city'), True)
self.assertEqual(hasattr(form_class, 'age'), True)
self.assertEqual(hasattr(form_class, 'is_admin'), False)
form = form_class()
self.assertEqual(isinstance(form.name, f.TextField), True)
self.assertEqual(isinstance(form.city, f.TextField), True)
self.assertEqual(isinstance(form.age, f.IntegerField), True)
def test_datetime_model(self):
"""Fields marked as auto_add / auto_add_now should not be included."""
form_class = model_form(DateTimeModel)
self.assertEqual(hasattr(form_class, 'prop_date_time_1'), True)
self.assertEqual(hasattr(form_class, 'prop_date_time_2'), False)
self.assertEqual(hasattr(form_class, 'prop_date_time_3'), False)
self.assertEqual(hasattr(form_class, 'prop_date_1'), True)
self.assertEqual(hasattr(form_class, 'prop_date_2'), False)
self.assertEqual(hasattr(form_class, 'prop_date_3'), False)
self.assertEqual(hasattr(form_class, 'prop_time_1'), True)
self.assertEqual(hasattr(form_class, 'prop_time_2'), False)
self.assertEqual(hasattr(form_class, 'prop_time_3'), False)
def test_not_implemented_properties(self):
# This should not raise NotImplementedError.
form_class = model_form(AllPropertiesModel)
# These should be set.
self.assertEqual(hasattr(form_class, 'prop_string'), True)
self.assertEqual(hasattr(form_class, 'prop_byte_string'), True)
self.assertEqual(hasattr(form_class, 'prop_boolean'), True)
self.assertEqual(hasattr(form_class, 'prop_integer'), True)
self.assertEqual(hasattr(form_class, 'prop_float'), True)
self.assertEqual(hasattr(form_class, 'prop_date_time'), True)
self.assertEqual(hasattr(form_class, 'prop_date'), True)
self.assertEqual(hasattr(form_class, 'prop_time'), True)
self.assertEqual(hasattr(form_class, 'prop_string_list'), True)
self.assertEqual(hasattr(form_class, 'prop_reference'), True)
self.assertEqual(hasattr(form_class, 'prop_self_refeference'), True)
self.assertEqual(hasattr(form_class, 'prop_blob'), True)
self.assertEqual(hasattr(form_class, 'prop_text'), True)
self.assertEqual(hasattr(form_class, 'prop_category'), True)
self.assertEqual(hasattr(form_class, 'prop_link'), True)
self.assertEqual(hasattr(form_class, 'prop_email'), True)
self.assertEqual(hasattr(form_class, 'prop_geo_pt'), True)
self.assertEqual(hasattr(form_class, 'prop_phone_number'), True)
self.assertEqual(hasattr(form_class, 'prop_postal_address'), True)
self.assertEqual(hasattr(form_class, 'prop_rating'), True)
# These should NOT be set.
self.assertEqual(hasattr(form_class, 'prop_list'), False)
self.assertEqual(hasattr(form_class, 'prop_user'), False)
self.assertEqual(hasattr(form_class, 'prop_im'), False)
def test_populate_form(self):
entity = Author(
key_name='test',
name='John',
city='Yukon',
age=25,
is_admin=True)
entity.put()
obj = Author.get_by_key_name('test')
form_class = model_form(Author)
form = form_class(obj=obj)
self.assertEqual(form.name.data, 'John')
self.assertEqual(form.city.data, 'Yukon')
self.assertEqual(form.age.data, 25)
self.assertEqual(form.is_admin.data, True)
def test_field_attributes(self):
form_class = model_form(Author, field_args={
'name': {
'label': 'Full name',
'description': 'Your name',
},
'age': {
'label': 'Age',
'validators': [validators.NumberRange(min=14, max=99)],
},
'city': {
'label': 'City',
'description': 'The city in which you live, not the one in'
' which you were born.',
},
'is_admin': {
'label': 'Administrative rights',
},
})
form = form_class()
self.assertEqual(form.name.label.text, 'Full name')
self.assertEqual(form.name.description, 'Your name')
self.assertEqual(form.age.label.text, 'Age')
self.assertEqual(form.city.label.text, 'City')
self.assertEqual(
form.city.description,
'The city in which you live, not the one in which you were born.')
self.assertEqual(form.is_admin.label.text, 'Administrative rights')
def test_reference_property(self):
keys = set(['__None'])
for name in ['foo', 'bar', 'baz']:
author = Author(name=name, age=26)
author.put()
keys.add(str(author.key()))
form_class = model_form(Book)
form = form_class()
for key, name, value in form.author.iter_choices():
assert key in keys
keys.remove(key)
assert not keys
class TestGeoFields(TestCase):
class GeoTestForm(Form):
geo = GeoPtPropertyField()
def test_geopt_property(self):
form = self.GeoTestForm(DummyPostData(geo='5.0, -7.0'))
self.assertTrue(form.validate())
self.assertEqual(form.geo.data, '5.0,-7.0')
form = self.GeoTestForm(DummyPostData(geo='5.0,-f'))
self.assertFalse(form.validate())
class TestReferencePropertyField(DBTestCase):
nosegae_datastore_v3 = True
def build_form(self, reference_class=Author, **kw):
class BookForm(Form):
author = ReferencePropertyField(
reference_class=reference_class,
**kw)
return BookForm
def author_expected(self, selected_index, get_label=lambda x: x.name):
expected = set()
for i, author in enumerate(self.authors):
expected.add((str(author.key()),
get_label(author),
i == selected_index))
return expected
def setUp(self):
super(TestReferencePropertyField, self).setUp()
self.authors = fill_authors(Author)
self.author_names = set(x.name for x in self.authors)
self.author_ages = set(x.age for x in self.authors)
def test_basic(self):
F = self.build_form(
get_label='name'
)
form = F()
self.assertEqual(
set(form.author.iter_choices()),
self.author_expected(None))
assert not form.validate()
form = F(DummyPostData(author=str(self.authors[0].key())))
assert form.validate()
self.assertEqual(
set(form.author.iter_choices()),
self.author_expected(0))
def test_not_in_query(self):
F = self.build_form()
new_author = Author(name='Jim', age=48)
new_author.put()
form = F(author=new_author)
form.author.query = Author.all().filter('name !=', 'Jim')
assert form.author.data is new_author
assert not form.validate()
def test_get_label_func(self):
get_age = lambda x: x.age
F = self.build_form(get_label=get_age)
form = F()
ages = set(x.label.text for x in form.author)
self.assertEqual(ages, self.author_ages)
def test_allow_blank(self):
F = self.build_form(allow_blank=True, get_label='name')
form = F(DummyPostData(author='__None'))
assert form.validate()
self.assertEqual(form.author.data, None)
expected = self.author_expected(None)
expected.add(('__None', '', True))
self.assertEqual(set(form.author.iter_choices()), expected)
class TestStringListPropertyField(TestCase):
class F(Form):
a = StringListPropertyField()
def test_basic(self):
form = self.F(DummyPostData(a='foo\nbar\nbaz'))
self.assertEqual(form.a.data, ['foo', 'bar', 'baz'])
self.assertEqual(form.a._value(), 'foo\nbar\nbaz')
|
|
import math
from PIL import Image, ImageDraw
def get_angle(p1, p2, p3):
"""
Calculates the angle between three points
https://en.wikipedia.org/wiki/Law_of_cosines#Applications
:param p1: center point
:type p1: tuple
:type p2: tuple
:type p3: tuple
:rtype: float
"""
f = point_distance
p12 = f(p1, p2)
p13 = f(p1, p3)
p23 = f(p2, p3)
if p12 == 0 or p13 == 0:
return math.acos(0)
result = (p12 ** 2 + p13 ** 2 - p23 ** 2) / (2 * p12 * p13)
return math.acos(result)
def convert_to_degree(radian):
return math.degrees(radian)
def point_distance(a, b):
"""
Calculates distance between two points
:rtype: float
"""
return math.sqrt(pow(a[0] - b[0], 2) + pow(a[1] - b[1], 2))
def get_control_points(coords, alpha):
"""
Returns list of control points that are created from coordinates.
Result list will be 2 * len(coords)
:param coords: list of coordinates
:param alpha: smooth factor
:rtype : list[tuple(2)]
"""
assert 0 < alpha < 1
cpoints = []
n = len(coords)
v = [(0, 0), list(coords[n - 1]), list(coords[0])]
mid = [[0, 0],
[(v[1][0] + v[2][0]) / 2.0, (v[1][1] + v[2][1]) / 2.0]]
vdist = [0, point_distance(v[1], v[2])]
anchor = [0, 0]
for i in range(n):
v[0] = v[1]
v[1] = v[2]
v[2] = coords[(i + 1) % n]
mid[0][0] = mid[1][0]
mid[0][1] = mid[1][1]
mid[1][0] = (v[1][0] + v[2][0]) / 2.0
mid[1][1] = (v[1][1] + v[2][1]) / 2.0
vdist[0] = vdist[1]
vdist[1] = point_distance(v[1], v[2])
p = vdist[0] / (vdist[0] + vdist[1])
anchor[0] = mid[0][0] + p * (mid[1][0] - mid[0][0])
anchor[1] = mid[0][1] + p * (mid[1][1] - mid[0][1])
xdelta = anchor[0] - v[1][0]
ydelta = anchor[1] - v[1][1]
c0 = (
alpha * (v[1][0] - mid[0][0] + xdelta) + mid[0][0] - xdelta,
alpha * (v[1][1] - mid[0][1] + ydelta) + mid[0][1] - ydelta)
c1 = (
alpha * (v[1][0] - mid[1][0] + xdelta) + mid[1][0] - xdelta,
alpha * (v[1][1] - mid[1][1] + ydelta) + mid[1][1] - ydelta)
cpoints.append([c0, c1])
return cpoints
def cubic_bezier(start, end, ctrl1, ctrl2, nv):
"""
Create bezier curve between start and end points
:param start: start anchor point
:param end: end anchor point
:param ctrl1: control point 1
:param ctrl2: control point 2
:param nv: number of points should be created between start and end
:return: list of smoothed points
"""
result = [start]
for i in range(nv - 1):
t = float(i) / (nv - 1)
tc = 1.0 - t
t0 = tc * tc * tc
t1 = 3.0 * tc * tc * t
t2 = 3.0 * tc * t * t
t3 = t * t * t
tsum = t0 + t1 + t2 + t3
x = (t0 * start[0] + t1 * ctrl1[0] + t2 * ctrl2[0] + t3 * end[0]) / tsum
y = (t0 * start[1] + t1 * ctrl1[1] + t2 * ctrl2[1] + t3 * end[1]) / tsum
result.append((x, y))
result.append(end)
return result
def line(p0, p1):
"""
Create line between two points based on Bresenham algorithm
"""
steep = False
x0 = p0[0]
y0 = p0[1]
x1 = p1[0]
y1 = p1[1]
if math.fabs(x0 - x1) < math.fabs(y0 - y1):
x0, y0 = y0, x0
x1, y1 = y1, x1
steep = True
if x0 > x1:
x0, x1 = x1, x0
y0, y1 = y1, y0
dx = x1 - x0
dy = y1 - y0
if dx == 0:
derror = 0.1
else:
derror = math.fabs(dy / dx)
error = 0.0
y = y0
x = x0
points = []
while x <= x1:
points.append((y, x) if steep else (x, y))
error += derror
if error > 0.5:
y += 1 if y1 > y0 else -1
error -= 1.
x += 1
return points
def smooth_points(coords, alpha, min_angle=45):
"""
Converts a list of points to polygon based on bezier curves
http://www.elvenprogrammer.org/projects/bezier/reference/
:param coords: list of coordinates
:param alpha: smooth factor
:return: point list of smoothed polygon
:rtype : list
"""
vertices_count = len(coords)
cpoints = get_control_points(coords, alpha)
points = []
i = 0
while i < vertices_count:
i_prev = (i - 1) % vertices_count
i_next = (i + 1) % vertices_count
i_next_2 = (i + 2) % vertices_count
p_current = coords[i]
p_prev = coords[i_prev]
p_next = coords[i_next]
p_next_2 = coords[i_next_2]
angle = convert_to_degree(get_angle(p_current, p_prev, p_next))
angle2 = convert_to_degree(get_angle(p_next, p_current, p_next_2))
if angle <= min_angle:
segment = line(p_current, p_next)
elif angle2 <= min_angle:
segment = line(p_current, p_next)
else:
segment = cubic_bezier(p_current, p_next,
cpoints[i][1], cpoints[i_next][0],
10)
points.extend(segment)
i += 1
return points
def __main():
print(line((0, 0), (5, 10)))
print(line((300, 100), (200, 250)))
im = Image.new('RGBA', (100, 100), (0, 0, 0, 0))
draw = ImageDraw.Draw(im)
coords = [(10, 30), (20, 20),
(30, 10), (50, 10),
(50, 30), (30, 30),
(10, 30)]
vertices_count = len(coords)
cpoints = get_control_points(coords, 0.5)
points = []
for i in range(vertices_count):
i_next = (i + 1) % vertices_count
segment = cubic_bezier(coords[i], coords[i_next],
cpoints[i][1], cpoints[i_next][0],
10)
points.extend(segment)
draw.polygon(points, fill='red', outline='black')
im.save('out2.png')
if __name__ == '__main__':
__main()
|
|
# Thierry Parmentelat -- INRIA
#
# a minimal library for writing "lightweight" SFA clients
#
# xxx todo
# this library should probably check for the expiration date of the various
# certificates and automatically retrieve fresh ones when expired
import sys
import os,os.path
import subprocess
from datetime import datetime
from sfa.util.xrn import Xrn
import sfa.util.sfalogging
# importing sfa.utils.faults does pull a lot of stuff
# OTOH it's imported from Certificate anyways, so..
from sfa.util.faults import RecordNotFound
from sfa.client.sfaserverproxy import SfaServerProxy
# see optimizing dependencies below
from sfa.trust.certificate import Keypair, Certificate
from sfa.trust.credential import Credential
from sfa.trust.gid import GID
##########
# a helper class to implement the bootstrapping of cryptoa. material
# assuming we are starting from scratch on the client side
# what's needed to complete a full slice creation cycle
# (**) prerequisites:
# (*) a local private key
# (*) the corresp. public key in the registry
# (**) step1: a self-signed certificate
# default filename is <hrn>.sscert
# (**) step2: a user credential
# obtained at the registry with GetSelfCredential
# using the self-signed certificate as the SSL cert
# default filename is <hrn>.user.cred
# (**) step3: a registry-provided certificate (i.e. a GID)
# obtained at the registry using Resolve
# using the step2 credential as credential
# default filename is <hrn>.user.gid
#
# From that point on, the GID is used as the SSL certificate
# and the following can be done
#
# (**) retrieve a slice (or authority) credential
# obtained at the registry with GetCredential
# using the (step2) user-credential as credential
# default filename is <hrn>.<type>.cred
# (**) retrieve a slice (or authority) GID
# obtained at the registry with Resolve
# using the (step2) user-credential as credential
# default filename is <hrn>.<type>.cred
#
# (**) additionnally, it might make sense to upgrade a GID file
# into a pkcs12 certificate usable in a browser
# this bundled format allows for embedding the private key
#
########## Implementation notes
#
# (*) decorators
#
# this implementation is designed as a guideline for
# porting to other languages
#
# the decision to go for decorators aims at focusing
# on the core of what needs to be done when everything
# works fine, and to take caching and error management
# out of the way
#
# for non-pythonic developers, it should be enough to
# implement the bulk of this code, namely the _produce methods
# and to add caching and error management by whichever means
# is available, including inline
#
# (*) self-signed certificates
#
# still with other languages in mind, we've tried to keep the
# dependencies to the rest of the code as low as possible
#
# however this still relies on the sfa.trust.certificate module
# for the initial generation of a self-signed-certificate that
# is associated to the user's ssh-key
# (for user-friendliness, and for smooth operations with planetlab,
# the usage model is to reuse an existing keypair)
#
# there might be a more portable, i.e. less language-dependant way, to
# implement this step by exec'ing the openssl command.
# a known successful attempt at this approach that worked
# for Java is documented below
# http://nam.ece.upatras.gr/fstoolkit/trac/wiki/JavaSFAClient
#
# (*) pkcs12
#
# the implementation of the pkcs12 wrapping, which is a late addition,
# is done through direct calls to openssl
#
####################
class SfaClientException (Exception): pass
class SfaClientBootstrap:
# dir is mandatory but defaults to '.'
def __init__ (self, user_hrn, registry_url, dir=None,
verbose=False, timeout=None, logger=None):
self.hrn=user_hrn
self.registry_url=registry_url
if dir is None: dir="."
self.dir=dir
self.verbose=verbose
self.timeout=timeout
# default for the logger is to use the global sfa logger
if logger is None:
logger = sfa.util.sfalogging.logger
self.logger=logger
######################################## *_produce methods
### step1
# unconditionnally create a self-signed certificate
def self_signed_cert_produce (self, output):
self.assert_private_key()
private_key_filename = self.private_key_filename()
keypair=Keypair(filename=private_key_filename)
self_signed = Certificate (subject = self.hrn)
self_signed.set_pubkey (keypair)
self_signed.set_issuer (keypair, self.hrn)
self_signed.sign ()
self_signed.save_to_file (output)
self.logger.debug("SfaClientBootstrap: Created self-signed certificate for %s in %s"%\
(self.hrn, output))
return output
### step2
# unconditionnally retrieve my credential (GetSelfCredential)
# we always use the self-signed-cert as the SSL cert
def my_credential_produce (self, output):
self.assert_self_signed_cert()
certificate_filename = self.self_signed_cert_filename()
certificate_string = self.plain_read (certificate_filename)
self.assert_private_key()
registry_proxy = SfaServerProxy (self.registry_url,
self.private_key_filename(),
certificate_filename)
try:
credential_string=registry_proxy.GetSelfCredential (certificate_string, self.hrn, "user")
except:
# some urns hrns may replace non hierarchy delimiters '.' with an '_' instead of escaping the '.'
hrn = Xrn(self.hrn).get_hrn().replace('\.', '_')
credential_string=registry_proxy.GetSelfCredential (certificate_string, hrn, "user")
self.plain_write (output, credential_string)
self.logger.debug("SfaClientBootstrap: Wrote result of GetSelfCredential in %s"%output)
return output
### step3
# unconditionnally retrieve my GID - use the general form
def my_gid_produce (self,output):
return self.gid_produce (output, self.hrn, "user")
### retrieve any credential (GetCredential) unconditionnal form
# we always use the GID as the SSL cert
def credential_produce (self, output, hrn, type):
self.assert_my_gid()
certificate_filename = self.my_gid_filename()
self.assert_private_key()
registry_proxy = SfaServerProxy (self.registry_url, self.private_key_filename(),
certificate_filename)
self.assert_my_credential()
my_credential_string = self.my_credential_string()
credential_string=registry_proxy.GetCredential (my_credential_string, hrn, type)
self.plain_write (output, credential_string)
self.logger.debug("SfaClientBootstrap: Wrote result of GetCredential in %s"%output)
return output
def slice_credential_produce (self, output, hrn):
return self.credential_produce (output, hrn, "slice")
def authority_credential_produce (self, output, hrn):
return self.credential_produce (output, hrn, "authority")
### retrieve any gid (Resolve) - unconditionnal form
# use my GID when available as the SSL cert, otherwise the self-signed
def gid_produce (self, output, hrn, type ):
try:
self.assert_my_gid()
certificate_filename = self.my_gid_filename()
except:
self.assert_self_signed_cert()
certificate_filename = self.self_signed_cert_filename()
self.assert_private_key()
registry_proxy = SfaServerProxy (self.registry_url, self.private_key_filename(),
certificate_filename)
credential_string=self.plain_read (self.my_credential())
records = registry_proxy.Resolve (hrn, credential_string)
records=[record for record in records if record['type']==type]
if not records:
raise RecordNotFound, "hrn %s (%s) unknown to registry %s"%(hrn,type,self.registry_url)
record=records[0]
self.plain_write (output, record['gid'])
self.logger.debug("SfaClientBootstrap: Wrote GID for %s (%s) in %s"% (hrn,type,output))
return output
# http://trac.myslice.info/wiki/MySlice/Developer/SFALogin
### produce a pkcs12 bundled certificate from GID and private key
# xxx for now we put a hard-wired password that's just, well, 'password'
# when leaving this empty on the mac, result can't seem to be loaded in keychain..
def my_pkcs12_produce (self, filename):
password=raw_input("Enter password for p12 certificate: ")
openssl_command=['openssl', 'pkcs12', "-export"]
openssl_command += [ "-password", "pass:%s"%password ]
openssl_command += [ "-inkey", self.private_key_filename()]
openssl_command += [ "-in", self.my_gid_filename()]
openssl_command += [ "-out", filename ]
if subprocess.call(openssl_command) ==0:
print "Successfully created %s"%filename
else:
print "Failed to create %s"%filename
# Returns True if credential file is valid. Otherwise return false.
def validate_credential(self, filename):
valid = True
cred = Credential(filename=filename)
# check if credential is expires
if cred.get_expiration() < datetime.utcnow():
valid = False
return valid
#################### public interface
# return my_gid, run all missing steps in the bootstrap sequence
def bootstrap_my_gid (self):
self.self_signed_cert()
self.my_credential()
return self.my_gid()
# once we've bootstrapped we can use this object to issue any other SFA call
# always use my gid
def server_proxy (self, url):
self.assert_my_gid()
return SfaServerProxy (url, self.private_key_filename(), self.my_gid_filename(),
verbose=self.verbose, timeout=self.timeout)
# now in some cases the self-signed is enough
def server_proxy_simple (self, url):
self.assert_self_signed_cert()
return SfaServerProxy (url, self.private_key_filename(), self.self_signed_cert_filename(),
verbose=self.verbose, timeout=self.timeout)
# this method can optionnally be invoked to ensure proper
# installation of the private key that belongs to this user
# installs private_key in working dir with expected name -- preserve mode
# typically user_private_key would be ~/.ssh/id_rsa
# xxx should probably check the 2 files are identical
def init_private_key_if_missing (self, user_private_key):
private_key_filename=self.private_key_filename()
if not os.path.isfile (private_key_filename):
key=self.plain_read(user_private_key)
self.plain_write(private_key_filename, key)
os.chmod(private_key_filename,os.stat(user_private_key).st_mode)
self.logger.debug("SfaClientBootstrap: Copied private key from %s into %s"%\
(user_private_key,private_key_filename))
#################### private details
# stupid stuff
def fullpath (self, file): return os.path.join (self.dir,file)
# the expected filenames for the various pieces
def private_key_filename (self):
return self.fullpath ("%s.pkey" % Xrn.unescape(self.hrn))
def self_signed_cert_filename (self):
return self.fullpath ("%s.sscert"%self.hrn)
def my_credential_filename (self):
return self.credential_filename (self.hrn, "user")
# the tests use sfi -u <pi-user>; meaning that the slice credential filename
# needs to keep track of the user too
def credential_filename (self, hrn, type):
if type in ['user']:
basename="%s.%s.cred"%(hrn,type)
else:
basename="%s-%s.%s.cred"%(self.hrn,hrn,type)
return self.fullpath (basename)
def slice_credential_filename (self, hrn):
return self.credential_filename(hrn,'slice')
def authority_credential_filename (self, hrn):
return self.credential_filename(hrn,'authority')
def my_gid_filename (self):
return self.gid_filename (self.hrn, "user")
def gid_filename (self, hrn, type):
return self.fullpath ("%s.%s.gid"%(hrn,type))
def my_pkcs12_filename (self):
return self.fullpath ("%s.p12"%self.hrn)
# optimizing dependencies
# originally we used classes GID or Credential or Certificate
# like e.g.
# return Credential(filename=self.my_credential()).save_to_string()
# but in order to make it simpler to other implementations/languages..
def plain_read (self, filename):
infile=file(filename,"r")
result=infile.read()
infile.close()
return result
def plain_write (self, filename, contents):
outfile=file(filename,"w")
result=outfile.write(contents)
outfile.close()
def assert_filename (self, filename, kind):
if not os.path.isfile (filename):
raise IOError,"Missing %s file %s"%(kind,filename)
return True
def assert_private_key (self):
return self.assert_filename (self.private_key_filename(),"private key")
def assert_self_signed_cert (self):
return self.assert_filename (self.self_signed_cert_filename(),"self-signed certificate")
def assert_my_credential (self):
return self.assert_filename (self.my_credential_filename(),"user's credential")
def assert_my_gid (self):
return self.assert_filename (self.my_gid_filename(),"user's GID")
# decorator to make up the other methods
def get_or_produce (filename_method, produce_method, validate_method=None):
# default validator returns true
def wrap (f):
def wrapped (self, *args, **kw):
filename=filename_method (self, *args, **kw)
if os.path.isfile ( filename ):
if not validate_method:
return filename
elif validate_method(self, filename):
return filename
else:
# remove invalid file
self.logger.warning ("Removing %s - has expired"%filename)
os.unlink(filename)
try:
produce_method (self, filename, *args, **kw)
return filename
except IOError:
raise
except :
error = sys.exc_info()[:2]
message="Could not produce/retrieve %s (%s -- %s)"%\
(filename,error[0],error[1])
self.logger.log_exc(message)
raise Exception, message
return wrapped
return wrap
@get_or_produce (self_signed_cert_filename, self_signed_cert_produce)
def self_signed_cert (self): pass
@get_or_produce (my_credential_filename, my_credential_produce, validate_credential)
def my_credential (self): pass
@get_or_produce (my_gid_filename, my_gid_produce)
def my_gid (self): pass
@get_or_produce (my_pkcs12_filename, my_pkcs12_produce)
def my_pkcs12 (self): pass
@get_or_produce (credential_filename, credential_produce, validate_credential)
def credential (self, hrn, type): pass
@get_or_produce (slice_credential_filename, slice_credential_produce, validate_credential)
def slice_credential (self, hrn): pass
@get_or_produce (authority_credential_filename, authority_credential_produce, validate_credential)
def authority_credential (self, hrn): pass
@get_or_produce (gid_filename, gid_produce)
def gid (self, hrn, type ): pass
# get the credentials as strings, for inserting as API arguments
def my_credential_string (self):
self.my_credential()
return self.plain_read(self.my_credential_filename())
def slice_credential_string (self, hrn):
self.slice_credential(hrn)
return self.plain_read(self.slice_credential_filename(hrn))
def authority_credential_string (self, hrn):
self.authority_credential(hrn)
return self.plain_read(self.authority_credential_filename(hrn))
# for consistency
def private_key (self):
self.assert_private_key()
return self.private_key_filename()
def delegate_credential_string (self, original_credential, to_hrn, to_type='authority'):
"""
sign a delegation credential to someone else
original_credential : typically one's user- or slice- credential to be delegated to s/b else
to_hrn : the hrn of the person that will be allowed to do stuff on our behalf
to_type : goes with to_hrn, usually 'user' or 'authority'
returns a string with the delegated credential
this internally uses self.my_gid()
it also retrieves the gid for to_hrn/to_type
and uses Credential.delegate()"""
# the gid and hrn of the object we are delegating
if isinstance (original_credential, str):
original_credential = Credential (string=original_credential)
original_gid = original_credential.get_gid_object()
original_hrn = original_gid.get_hrn()
if not original_credential.get_privileges().get_all_delegate():
self.logger.error("delegate_credential_string: original credential %s does not have delegate bit set"%original_hrn)
return
# the delegating user's gid
my_gid = self.my_gid()
# retrieve the GID for the entity that we're delegating to
to_gidfile = self.gid (to_hrn,to_type)
# to_gid = GID ( to_gidfile )
# to_hrn = delegee_gid.get_hrn()
# print 'to_hrn',to_hrn
delegated_credential = original_credential.delegate(to_gidfile, self.private_key(), my_gid)
return delegated_credential.save_to_string(save_parents=True)
|
|
"""Simple language detector
"""
import re
import math
import random
import unicodedata
from operator import itemgetter
from itertools import chain
from collections import defaultdict
def stream_sample(filename):
"""Streams over a dataset, iterates over language label and sample text"""
with open(filename) as fin:
for line in fin:
lang, text = line[:-1].decode('utf8').split('\t')
yield lang, text
class LanguageDetector(object):
"""Base class for a language detector
NOTE: do not use this class, use one of the subclasses.
"""
def train(self, samples):
raise NotImplemented
def detect(self, text):
return 'xx', 0.0
def eval(self, samples):
"""Evaluate the model against a labelled set"""
tp = defaultdict(int)
fn = defaultdict(int)
fp = defaultdict(int)
languages = set()
mistakes = []
for label, text in samples:
languages.add(label)
lang_code, _ = self.detect(text)
if lang_code == label:
tp[label] += 1
else:
mistakes.append((text, label, lang_code))
fn[label] += 1
fp[lang_code] += 1
precision = {}
recall = {}
for lang in languages:
if tp[lang] + fp[lang] == 0:
precision[lang] = 0.0
else:
precision[lang] = tp[lang] / float(tp[lang] + fp[lang]) * 100.0
if tp[lang] + fn[lang] == 0:
recall[lang] = 0
else:
recall[lang] = tp[lang] / float(tp[lang] + fn[lang]) * 100.0
return precision, recall, mistakes
class RandomLanguageDetector(LanguageDetector):
"""Simple random classifier.
"""
def train(self, samples):
model = set()
for label, _ in samples:
model.add(label)
model.add('xx')
self._model = list(model)
def detect(self, text):
return random.choice(self._model), 1.0
class CosineLanguageDetector(LanguageDetector):
"""Cosine similarity based language classifier that uses single chars as features
"""
def _preprocess(self, text):
text = unicodedata.normalize('NFC', text)
#
# We can apply other classic normalization for text, like lower case
# transform and punctuations removal
#
# text = ' '.join(word_tokenizer(text))
# return text.lower()
#
return text
def _extract_features(self, text):
return list(self._preprocess(text))
def _normalize_vector(self, v):
norm = math.sqrt(sum(x*x for x in v.itervalues()))
for k in v:
v[k] /= norm
def train(self, samples):
extract_features = self._extract_features
model = defaultdict(lambda: defaultdict(float))
for label, text in samples:
features = extract_features(text)
for f in features:
model[label][f] += 1
for v in model.itervalues():
self._normalize_vector(v)
self._model = dict(model)
def detect(self, text):
features = self._extract_features(text)
u = defaultdict(float)
for f in features:
u[f] += 1
self._normalize_vector(u)
r = []
for l, v in self._model.iteritems():
score = 0.0
for f in u:
score += u[f] * v.get(f, 0.0)
r.append((l, score))
return max(r, key=itemgetter(1))
class BigramFeatureMixin(object):
def _extract_features(self, text):
text = self._preprocess(text)
return [text[i:i+2] for i in xrange(len(text)-1)]
class TrigramFeatureMixin(object):
def _extract_features(self, text):
text = self._preprocess(text)
return [text[i:i+3] for i in xrange(len(text)-2)]
class BigramCosineLanguageDetector(BigramFeatureMixin, CosineLanguageDetector):
"""Cosine similarity language classifier with bigrams as features"""
pass
class TrigramCosineLanguageDetector(TrigramFeatureMixin, CosineLanguageDetector):
"""Cosine similarity language classifier with trigrams as features"""
pass
word_tokenizer = re.compile('\w+', re.U).findall
class MultipleFeatureMixin(object):
weight = [1.0, 1.0, 1.0]
def _extract_features(self, text):
text = self._preprocess(text)
unigrams = list(text)
bigrams = [text[i:i+2] for i in xrange(len(text)-1)]
trigrams = [text[i:i+3] for i in xrange(len(text)-2)]
return [x for x in chain(unigrams, bigrams, trigrams) if not x.isdigit()]
def _normalize_vector(self, v):
# Normalize each feature as they are separated vector, this means the
# result is the sum of the dot products of each feature group
# To apply different weights for each feature group you can change the
# `weight` vector.
norm = [0.0, 0.0, 0.0]
for k, x in v.iteritems():
norm[len(k)-1] += x*x
for i in range(len(norm)):
norm[i] = math.sqrt(norm[i]) * (1.0/math.sqrt(self.weight[i]))
for k in v:
v[k] /= norm[len(k)-1]
class MultiCosineLanguageDetector(MultipleFeatureMixin, CosineLanguageDetector):
"""Cosine similarity language classifier with multiple features
Uses the following features:
- single chars
- bigrams
- trigrams
"""
pass
|
|
'''
This file runs analysis on the vision output from buoys (or other things)
to determine where the buoy is most likely to be.
This outputs as the mission's tagged location the location where it is
most likely to find the buoy.
We keep a grid storing a top-down view of the area where we think the buoy is.
Vision results are used as evidence of where the buoy is likely to be found.
Positive results increase the likley in a region by where we think the buoy is.
This region is approximated by the angle to the buoy in the screen and
a rough distance estimate from the size in pixels of the buoy.
Seeing a buoy other than the target buoy helps, too, since that is used to give
an 'inferred' position of the target buoy, through the use of visual estimates
of buoy relative positions. These estimates (in Configuration) need only be
fairly rough (on the order of half a meter accuracy) to still be useful.
Not seeing the target buoy is also used as evidence, but in this case
it decreases the probability of finding the buoy in the region where it
could be seen. That is, in a region bounded by the visible angles of the cameras
and by the distances at which we could see the buoy.
More specifically, this program performs Bayesian updates of the probability
distribution function (approximated in a grid) through use of evidence from
the vision system. We compute the probability of the buoy being at
any given location by Bayes theorem and an estimated distribution of
the likelihood that the reading would be gotten if the buoy is at that
location.
This gives a structured means of incorporating a wide variety of data.
This program uses approximate knowledge of:
-relative buoy positions from visual observations
-Sub movements from DVL readings
-Vision success or failure in any of the three buoy colors
'''
####Configuration
POSITIVE_WEIGHT = 0.80 #For same-color evidence (eg: seeing orange, looking for orange)
POSITIVE_WIDTH = 6 # inversely proportional to width of region for positive updates
INFERRED_WEIGHT = 0.52 #For off-color evidence (eg: seeing green, looking for orange)
INFERRED_WIDTH = 3.5 # inversely proportional to width of region fo inferred updates
NEGATIVE_WEIGHT = 0.1 # Gives little weight to not-observing
NEGATIVE_WIDTH = 3 # Gives a very wide region for not-observing
NEGATIVE_MIN_DIST = 0.25 # Closest point at which we can see a buoy
NEGATIVE_MAX_DIST = 3 # Furthest point at which we can see a buoy
#TODO: negative width should be determined to roughly match our FOV
#Size of buoy in meters
#used to estimate buoy distance
BUOY_RADIUS = 0.16
#Factors to give minimum and maximum distance from approximated distance
MIN_DIST_FACTOR = 0.8
MAX_DIST_FACTOR = 1.3
MIN_DIST_FACTOR_INFERRED = 0.4
MAX_DIST_FACTOR_INFERRED = 1.8
# Constants
SIZE = 400 #cell count
LENGTH = 5 # meters
####Locator Task
import numpy
import pylab
import math
import camera
import shm
from mission.tasks import vector
import time
import distance.calc_pos_to_objs as calc_pos
# Enable warning output
# NOTE: the given warnings appear innocuous but loud
import os
os.environ['PYOPENCL_COMPILER_OUTPUT'] = '1'
# We might not have OpenCL installed
# If not, try the NumPy version instead
try:
#from locator_cl import LocatorCL as Locator
from locator_numpy import Locator
except ImportError, e:
print "Unable to import OpenCL, defaulting to NumPy implementation"
print "For instructions on installing OpenCL, see CUAUV Wiki page: Software/OpenCL"
from locator_numpy import Locator
north = shm.kalman.north
east = shm.kalman.east
hdg = shm.kalman.heading
buoy_x = {"orange" : shm.orange_results.center_x,
"green" : shm.green_results.center_x,
"yellow" : shm.yellow_results.center_x,
"led1" : shm.led_buoy_results.center_x,
"led2" : shm.led_buoy_results2.center_x}
buoy_y = {"orange" : shm.orange_results.center_y,
"green" : shm.green_results.center_y,
"yellow" : shm.yellow_results.center_y,
"led1" : shm.led_buoy_results.center_y,
"led2" : shm.led_buoy_results2.center_y}
buoy_prob = {"orange" : shm.orange_results.probability,
"green" : shm.green_results.probability,
"yellow" : shm.yellow_results.probability,
"led1" : shm.led_buoy_results.probability,
"led2" : shm.led_buoy_results2.probability}
buoy_area = {"orange" : shm.orange_results.area,
"green" : shm.green_results.area,
"yellow" : shm.yellow_results.area,
"led1" : shm.led_buoy_results.area,
"led2" : shm.led_buoy_results2.area}
buoy_watch_group = dict(orange=shm.orange_results,
green=shm.green_results,
yellow=shm.yellow_results,
led1=shm.led_buoy_results,
led2=shm.led_buoy_results2)
name = dict(orange="orange_buoy", # Used for calc_pos_to_objs
yellow="orange_buoy",
green="green_buoy",
led1="led_buoy1",
led2="led_buoy2")
buoy_colors = ["orange", "led1", "led2"] #["orange", "green", "yellow"]
camera_w = shm.camera.forward_width
camera_height = shm.camera.forward_height
def x_distance_to_target(target):
n,e = target
target_pos = vector.Vector(n,e)
current_pos = vector.Vector(north.get(), east.get())
heading = vector.FromAuvAngle(hdg.get())
to_target = target_pos - current_pos
forward = vector.ProjectOnto(to_target, heading)
return vector.Length(forward)
def weight_attenuate(weight, delta_time):
''' Determine how much weight to give to an update if the
previous update was delta_time ago.
Attempts to make it so that conversion is independent of
frame-rate of the updates.'''
w = weight/(1-weight)
new_weight = w**delta_time/(1+w**delta_time)
return new_weight
distance_calculator = calc_pos.PositionToObject()
class LocatorRunner(object):
'''
Create an object that on each update() call examines vision output
and vehicle state to update where a colored buoy most likely is.
'''
def __init__(self, target, orange_pos=None, green_pos=None, yellow_pos=None,
led_pos=None, prior_sigma=2., display=True,
locator_var=None, settings_var=None):
'''
target is the color of the buoy we are interested in
orange_pos, green_pos, yellow_pos are tuples of (north,east) coordinates
prior_sigma gives the variance of the initial guess. Should be something
like 1 (in meters) and can be gotten from mission layout system or just guessed.
Display determines whether to show output.
Warning: display is very slow and can affect results by slowing down the rate
of updates.
'''
self.display = display
self.output_var = locator_var
self.settings_var = settings_var
self.target = target
if locator_var is None:
raise Exception("locator_var required")
if settings_var is None:
raise Exception("locator_var required")
#Relative positions of orange, green, and yellow buoys
#in meters, displacement is arbitrary.
positions = dict(orange=orange_pos,
yellow=yellow_pos,
green=green_pos,
led1=led_pos,
led2=led_pos)
positions = dict( [(color, pos) for color,pos in positions.items()
if pos is not None] )
target_n, target_e = positions[self.target]
#Calculate the relative offsets
self.N_OFFSET = dict((color, target_n - n) for color, (n,e) in positions.items())
self.E_OFFSET = dict((color, target_e - e) for color, (n,e) in positions.items())
# The actual Locator
self.locator = Locator(target_n, target_e, LENGTH, SIZE, prior_sigma)
self.likely_depths = [] #list of depth guesses, locator probability
if self.display:
pylab.ion()
self.figure = pylab.figure()
import matplotlib.colors
norm = matplotlib.colors.LogNorm(1e-7,1)
self.img = pylab.imshow(self.locator.probabilities.reshape((SIZE,SIZE)),
norm=norm,
picker=True, origin="lower",
extent=(numpy.min(self.locator.easts),
numpy.max(self.locator.easts),
numpy.min(self.locator.norths),
numpy.max(self.locator.norths)))
self.img.set_interpolation("nearest")
#colorbar = pylab.colorbar()
#ticks =[1,1e-1,1e-2,1e-3,1e-4,1e-5,1e-6]
#colorbar.set_ticks(ticks)
#colorbar.set_ticklabels([str(x) for x in ticks])
self.sub_pos = pylab.plot([], [], "r-")[0] #Plot sub position over time as red line
self.current_pos = pylab.plot([],[], "ro")[0] #Plot of current position
self.output_pos = pylab.plot([], [], "go")[0] #Plot of the output point
# Draw layout positions of the items
for color, (n,e) in positions.items():
if not "led" in color: # stupid hack to avoid a problem... FIX
pylab.plot([e], [n], "^", color=color)
# Updates when there is new vision data
self.vision_watcher = shm.watchers.watcher()
for color in buoy_colors:
self.vision_watcher.watch(buoy_watch_group[color])
# Updates when as the sub moves
# Useful for updating sub position when drawing
# even when vision data isn't changing
tick_watcher = shm.watchers.watcher()
tick_watcher.watch(shm.kalman)
self.last_update = time.time()
# Clear any existing probabilities
self.output_var.probability.set(0)
def update(self):
'''
Updates the locator output in shared memory by examining vehicle and vision state
Update should be called regularly, for example on vehicle position updates
'''
heading = hdg.get()
#Get the sub's positions
curr_east, curr_north = east.get(), north.get()
if self.display:
#Draw the sub's position
xs, ys = self.sub_pos.get_data()
self.sub_pos.set_data(list(xs)+[curr_east],list(ys)+[curr_north])
self.current_pos.set_data( [curr_east], [curr_north] )
self.figure.canvas.draw()
# Don't perform updates if we haven't gotten new vision results
if not self.vision_watcher.has_changed():
if self.display:
pylab.draw()
return
delta_time = max(time.time() - self.last_update, 1)
# Perform updates for each buoy that we see
for color in buoy_colors:
if buoy_prob[color].get() > 0.5:
x,y = buoy_x[color].get(), buoy_y[color].get()
angle_x, angle_y = camera.screen_to_angle(x,y)
area = buoy_area[color].get()
if area == 0:
continue #Can't divide by zero!
try:
dist = distance_calculator.get_distance(name[color])
except ZeroDivisionError:
# Just skip this since
# area must have been zero
print "oops! got zero area"
continue
if self.target == color:
min_dist = dist*MIN_DIST_FACTOR
max_dist = dist*MAX_DIST_FACTOR
weight = POSITIVE_WEIGHT
weight = weight_attenuate(weight,delta_time)
width = POSITIVE_WIDTH
run = True
elif self.settings_var.use_inferred_updates.get():
min_dist = dist*MIN_DIST_FACTOR_INFERRED
max_dist = dist*MAX_DIST_FACTOR_INFERRED
weight = INFERRED_WEIGHT
weight = weight_attenuate(weight,delta_time)
width = INFERRED_WIDTH
run = True
else:
run = False
if run:
# Perform the actual update
self.locator.update( (curr_north+self.N_OFFSET[color],
curr_east+self.E_OFFSET[color]),
angle_x + heading,
min_dist, max_dist,
width = width,
in_weight = weight,
out_weight = 1-weight)
color = self.target
# Check if we don't see it
# note: only doing this for the color we're looking for
# we could be checking for not-seeing the other colors, too
# but that strikes me as wild and cavalier
if buoy_prob[color].get() < 0.1:
weight = NEGATIVE_WEIGHT
weight = weight_attenuate(weight,delta_time)
# We don't see the buoy, so we want a 'negative' update
self.locator.update( (curr_north+self.N_OFFSET[self.target],
curr_east+self.E_OFFSET[self.target]),
heading,
NEGATIVE_MIN_DIST, NEGATIVE_MAX_DIST,
NEGATIVE_WIDTH,
in_weight=weight,
out_weight=1-weight)
if self.display:
#Display
self.img.set_data(self.locator.probabilities.reshape((SIZE,SIZE)))
#Rescaling colors: (don't use if logparithmic plot enabled)
#img.set_clim( (locator.probabilities.min(), locator.probabilities.max()) )
#Tag the most likely position
north_out, east_out, prob_out = self.locator.get_max()
self.output_var.target_east.set(east_out)
self.output_var.target_north.set(north_out)
self.output_var.probability.set( prob_out )
self.last_update = time.time()
# This determines the position of the sub in the vertical direction
# The actual locator system has no means for determining depth, so
# this is a little ad hoc
#Update probable depth list based on y camera pos, pitch, and likely distance
if buoy_prob[color].get() > 0.5:
#buoy data is good
ypos = buoy_y[color].get()
FORWARD_V_FOV = math.degrees(camera.V_FOV)
xdist = x_distance_to_target((north_out, east_out))
cdepth = shm.kalman.depth.get()
cpitch = shm.kalman.pitch.get()
#TODO: technically, this math is wrong: angle is not proportional to height from the center
# example: if we had a very, very tall camera image, then 89degrees would take up a huge part of
# the screen but 0 degrees would be normal sized
angl = (((shm.camera.forward_height.get()/2.0 - ypos) / shm.camera.forward_height.get()) *
FORWARD_V_FOV + cpitch)
ydist = xdist * math.tan(math.radians(angl))
buoy_depth = cdepth - ydist
self.likely_depths.append((buoy_depth, prob_out))
expected_depth = self.settings_var.expected_depth.get()
#Calculate final depth based on probable depth list
def get_likely_depth():
# Ignore really way-off depths
likely_depths = [(x,y) for (x,y) in self.likely_depths if abs(x-expected_depth) < 1.0]
if len(likely_depths) < 7: #TODO: Remove constant
return 0 #TODO: 0 indicates no depth data!
#Calculate final depth using depth guesses weighted based on their corresponding locator probabilities
prob_sum = sum(map(lambda (_,y): y, likely_depths))
final_depth = sum(map(lambda (x,y): x * (y / prob_sum), likely_depths))
final_depth = max( 0.35 , final_depth) #TODO: Move this constant
return final_depth
self.output_var.target_depth.set(get_likely_depth())
if self.display:
self.output_pos.set_data([east_out], [north_out])
|
|
# Copyright 2014 Dirk Pranke. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyn_exceptions import PynException
from stats import Stats
from pool import Pool, Empty
from printer import Printer
class Builder(object):
def __init__(self, host, args, expand_vars, started_time):
self.host = host
self.args = args
self._should_overwrite = args.overwrite_status and not args.verbose
self.expand_vars = expand_vars
self.stats = Stats(host.getenv('NINJA_STATUS', '[%s/%t] '),
host.time, started_time)
self._printer = Printer(host.print_out, self._should_overwrite)
self._mtimes = {}
self._failures = 0
self._pool = None
def find_nodes_to_build(self, old_graph, graph):
node_names = self.args.targets or graph.defaults or graph.roots()
try:
nodes_to_build = graph.closure(node_names)
except KeyError as e:
raise PynException('error: unknown target %s' % str(e))
sorted_nodes = graph.tsort(nodes_to_build)
sorted_nodes = [n for n in sorted_nodes
if graph.nodes[n].rule_name != 'phony']
nodes_to_build = []
for node_name in sorted_nodes:
n = graph.nodes[node_name]
my_stat = self._stat(node_name)
if my_stat is None or any(self._stat(d) > my_stat
for d in n.deps()):
nodes_to_build.append(node_name)
continue
if old_graph and node_name in old_graph.nodes:
if (self._command(old_graph, node_name) !=
self._command(graph, node_name)):
nodes_to_build.append(node_name)
continue
return nodes_to_build
def build(self, graph, nodes_to_build):
stats = self.stats
stats.total = len(nodes_to_build)
stats.started = 0
stats.started_time = self.host.time()
running_jobs = []
self._pool = Pool(self.args.jobs, _call)
try:
while nodes_to_build and self._failures < self.args.errors:
while stats.started - stats.finished < self.args.jobs:
n = self._find_next_available_node(graph, nodes_to_build)
if n:
self._build_node(graph, n)
running_jobs.append(n)
else:
break
did_work = self._process_completed_jobs(graph, running_jobs)
if (not did_work and nodes_to_build and
self._failures < self.args.errors):
did_work = self._process_completed_jobs(graph,
running_jobs,
block=True)
while running_jobs:
did_work = self._process_completed_jobs(graph, running_jobs,
block=True)
finally:
self._pool.close()
self._pool.join()
self._printer.flush()
return 1 if self._failures else 0
def _find_next_available_node(self, graph, nodes_to_build):
next_node = None
for node_name in nodes_to_build:
n = graph.nodes[node_name]
if not any(d in graph.nodes and graph.nodes[d].running
for d in n.deps(include_order_only=True)):
next_node = node_name
break
if next_node:
# Ensure all of the dependencies actually exist.
# FIXME: is there a better place for this check?
for d in n.deps():
if not self.host.exists(d):
raise PynException("error: '%s', needed by '%s', %s" %
(d, next_node,
"missing and no known rule to make "
"it"))
nodes_to_build.remove(next_node)
return next_node
def _command(self, graph, node_name):
node = graph.nodes[node_name]
rule_scope = graph.rules[node.rule_name]
return self.expand_vars(rule_scope['command'], node.scope, rule_scope)
def _description(self, graph, node_name):
node = graph.nodes[node_name]
rule_scope = graph.rules[node.rule_name]
desc = rule_scope['description'] or rule_scope['command']
return self.expand_vars(desc, node.scope, rule_scope)
def _build_node(self, graph, node_name):
node = graph.nodes[node_name]
desc = self._description(graph, node_name)
command = self._command(graph, node_name)
self._build_node_started(node, desc, command)
dry_run = node.rule_name == 'phony' or self.args.dry_run
if not dry_run:
for o in node.outputs:
self.host.maybe_mkdir(self.host.dirname(o))
self._pool.send((node.name, desc, command, dry_run, self.host))
def _process_completed_jobs(self, graph, running_jobs, block=False):
did_work = False
while True:
try:
resp = self._pool.get(block=block)
running_jobs.remove(resp[0])
did_work = True
self._build_node_done(graph, resp)
if block:
break
except Empty:
break
return did_work
def _build_node_started(self, node, desc, command):
node.running = True
self.stats.started += 1
if self.args.verbose > 1:
self._update(command, elide=False)
else:
self._update(desc)
def _build_node_done(self, graph, result):
node_name, desc, command, ret, out, err = result
n = graph.nodes[node_name]
rule_scope = graph.rules[n.rule_name]
n.running = False
if n.scope['depfile'] and n.scope['deps'] == 'gcc':
path = self.expand_vars(n.scope['depfile'], n.scope, rule_scope)
if self.host.exists(path):
depsfile_deps = self.host.read(path).split()[2:]
self.host.remove(path)
if n.depsfile_deps != depsfile_deps:
n.depsfile_deps = depsfile_deps
graph.dirty = True
self.stats.finished += 1
if ret:
self._failures += 1
self._update(command, prefix='FAILED: ', elide=False)
elif self.args.verbose > 1:
self._update(command, elide=False)
elif self._should_overwrite:
self._update(desc)
if out or err:
self._printer.flush()
if out:
self.host.print_out(out, end='')
if err:
self.host.print_err(err, end='')
def _update(self, msg, prefix=None, elide=True):
prefix = prefix or self.stats.format()
self._printer.update(prefix + msg, elide=elide)
def _stat(self, name):
if not name in self._mtimes:
self._restat(name)
return self._mtimes.get(name, None)
def _restat(self, name):
if self.host.exists(name):
self._mtimes[name] = self.host.mtime(name)
else:
self._mtimes[name] = None
def _call(request):
node_name, desc, command, dry_run, host = request
if dry_run:
ret, out, err = 0, '', ''
else:
ret, out, err = host.call(command)
return (node_name, desc, command, ret, out, err)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.