gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import numpy as np
import csv, itertools
# This was copied from my water_correction project to the AccAssess plugin
# project on 23 January 2013. Knowing that may be useful later on. ...but
# probably not.
#### TEST DATA ####------------------------------------------------------------------------
def congalton_table1():
"""
Error matrix from Congalton 1991 Table 1.
Used here as an example for testing. See references.txt for full citations.
"""
mat = np.array([ [65,4,22,24],[6,81,5,8],[0,11,85,19],[4,7,3,90] ], dtype=int).view( ErrorMatrix )
return mat
def congalton_table2():
"""
Error matrix from Congalton 1991 Table 2.
Used here as an example for testing. See references.txt for full citations.
"""
sup = np.array([ [68,7,3,0],[12,112,15,10],[3,9,89,0],[0,2,5,56] ], dtype=int).view( ErrorMatrix )
return sup
def wundram_table2():
"""
Supervised classification results from Wundram and Loffler, 2008.
Used as an example in Pontius and Millones 2011. See references.txt for full
citations. Used here for testing purposes.
"""
tab2 = np.array([ [89, 3, 7, 0, 1],
[11,16,10, 1, 0],
[ 2,14,60, 2, 0],
[ 1, 9, 7, 5, 0],
[20, 1, 2, 0, 4] ], dtype=int ).view( ErrorMatrix )
return tab2
def wundram_table3():
"""
Unsupervised classification results from Wundram and Loffler, 2008.
Used as an example in Pontius and Millones 2011. See references.txt for full
citations. Used here for testing purposes.
"""
tab3 = np.array([ [114, 9, 8, 0, 1],
[ 5,23,11, 1, 0],
[ 1,10,59, 2, 0],
[ 0, 1, 7, 5, 0],
[ 3, 0, 1, 0, 4] ], dtype=int ).view( ErrorMatrix )
return tab3
def ref_array():
"""
A randomly generated array for testing.
This was generated with: ref = np.random.randint(0,5,(10,9)). We need
consistent results for testing so we don't regenerate it every time.
"""
ref = np.array([ [4, 4, 3, 4, 4, 3, 2, 4, 0],
[4, 2, 0, 3, 0, 3, 0, 3, 3],
[1, 0, 3, 1, 2, 4, 0, 1, 2],
[0, 4, 4, 1, 3, 3, 1, 2, 0],
[3, 0, 1, 0, 0, 1, 3, 2, 2],
[4, 1, 0, 3, 4, 4, 3, 4, 3],
[4, 3, 4, 1, 4, 0, 0, 2, 4],
[0, 4, 2, 1, 1, 4, 4, 4, 4],
[0, 2, 1, 1, 1, 4, 0, 0, 0],
[4, 2, 3, 0, 4, 4, 4, 1, 0]], dtype=int)
return ref
def comp_array():
"""
A randomly altered version of ref_array array for testing.
This was generated with:
ref = ref_array()
comp = ref.copy()
comp = comp[np.random.rand(*comp.shape) < 0.20] = np.random.randint(0,5)
That just replaces 20% of the numbers from ref with randomly generated
integers.
"""
comp = np.array([ [4, 4, 3, 4, 1, 1, 2, 4, 0],
[4, 1, 0, 3, 0, 3, 0, 3, 3],
[1, 0, 3, 1, 2, 1, 1, 1, 1],
[0, 4, 4, 1, 1, 3, 1, 2, 0],
[1, 0, 1, 1, 0, 1, 3, 2, 2],
[4, 1, 0, 3, 4, 4, 3, 4, 3],
[1, 3, 4, 1, 1, 1, 0, 2, 4],
[0, 4, 2, 1, 1, 4, 4, 1, 4],
[0, 2, 1, 1, 1, 4, 0, 0, 0],
[4, 2, 3, 0, 1, 4, 1, 1, 0]], dtype=int)
return comp
#---------------------------------------------------------------#### END OF TEST DATA ####
def validate_comparison( reference, comparison ):
"""
Take two arrays and make sure they can be compared by error_matrix.
In order for our error matrix to make sense, the comparison array should
not contain values that do not exist in the reference array. The numpy
histogram2d method will bin values and return resul... oh wait, this might
not be necessary...
"""
return "your mom"
def error_matrix( reference, comparison, categories=None, unclassified=0 ):
"""
Take a reference array and a comparison array and return an error matrix.
>>> error_matrix(ref_array(),comp_array())
ErrorMatrix([[15, 0, 0, 0],
[ 2, 9, 0, 0],
[ 3, 0, 13, 0],
[ 7, 0, 0, 20]])
"""
idx = np.where( reference<>unclassified )
all_classes = np.unique( np.vstack( (reference[idx],comparison[idx]) ) )
n = len( all_classes )
em = np.array([z.count(x) for z in [zip(reference.flatten(),comparison.flatten())] for x in itertools.product(all_classes,repeat=2)]).reshape(n,n).view( ErrorMatrix )
if categories:
em.categories = categories
else:
# need vstacked values so we can check both arrays
em.categories = all_classes.tolist()
return em
class ErrorMatrix( np.ndarray ):
"""
A subclass of numpy ndarray with methods to produce accuracy measures.
>>> errmat = ErrorMatrix( np.array([ [65,4,22,24],[6,81,5,8],[0,11,85,19],[4,7,3,90] ], dtype=int) )
>>> errmat.with_accuracies_and_totals
ErrorMatrix([[65, 4, 22, 24, 115, 57],
[6, 81, 5, 8, 100, 81],
[0, 11, 85, 19, 115, 74],
[4, 7, 3, 90, 104, 87],
[75, 103, 115, 141, 434, None],
[87, 79, 74, 64, None, 74]], dtype=object)
"""
def __new__(cls, input_array, categories=None, title=None):
"""
Constructor for new ErrorMatrix objects.
input_array -- An N x N (square) numpy array or a string representing
the filepath to a comma delimited csv file contaning an N x N matrix
with just numbers (no labels or anything). There is currently no shape
validation so non-square arrays will be accepted but most methods will
fail or give useless results.
categories -- A list of strings of length N containing labels for classifaction
categories. These will be used for labeling outputs.
title -- A string title for the error matrix that will be useful in identifying
what classification the error matrix and accuracy measures relate to.
"""
# Subclassing as described here: http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
# Deal with a file path being handed in
if input_array.__class__==str:
input_array = np.genfromtxt(input_array, delimiter=',')
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array).view(cls)
# add the new attributes to the created instance
if categories:
# if categories are given, use them
obj.categories = categories
else:
# if not, just label with numbers starting at 1
obj.categories = range(1,1+obj.shape[0])
obj.title = title
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
# see InfoArray.__array_finalize__ for comments
# http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
if obj is None: return
self.categories = getattr(obj, 'categories', range(1,1+self.shape[0]))
self.title = getattr(obj, 'title', None)
def round(self, *places):
"""
If float type, return rounded ErrorMatrix. Otherwise just return self.
"""
return super(ErrorMatrix, self).round( *places ).view( ErrorMatrix )
@property
def proportions( self ):
"""
Return the error matrix as proportions.
Every element of the matrix is devided by the sum of the matrix.
>>> wundram_table2().proportions.round(4)
ErrorMatrix([[ 0.3358, 0.0113, 0.0264, 0. , 0.0038],
[ 0.0415, 0.0604, 0.0377, 0.0038, 0. ],
[ 0.0075, 0.0528, 0.2264, 0.0075, 0. ],
[ 0.0038, 0.034 , 0.0264, 0.0189, 0. ],
[ 0.0755, 0.0038, 0.0075, 0. , 0.0151]])
"""
return np.nan_to_num( self.astype(float) / self.sum() )
@property
def proportion_in_reference( self ):
"""
Returns the proportion of pixels that fall into each category for the reference data.
>>> wundram_table2().proportion_in_reference.round(4)
ErrorMatrix([ 0.4642, 0.1623, 0.3245, 0.0302, 0.0189])
"""
return np.nan_to_num( self.sum(axis=0).astype(float) / self.sum() )
@property
def proportion_in_comparison( self ):
"""
Returns the proportion of pixels that fall into each category for the comparison data.
>>> wundram_table2().proportion_in_comparison.round(4)
ErrorMatrix([ 0.3774, 0.1434, 0.2943, 0.083 , 0.1019])
"""
return np.nan_to_num( self.sum(axis=1).astype(float) / self.sum() )
def observed_proportions( self, pop_totals=None ):
"""
Transform the matrix from observed samples to observed proportions using
equation 1 from Pontius and Millones 2011. I'm still a little confused
about using population totals in stratified random sampling. I need to
look into that and do some more testing.
>>> wundram_table2().observed_proportions().round(4)
ErrorMatrix([[ 0.3358, 0.0113, 0.0264, 0. , 0.0038],
[ 0.0415, 0.0604, 0.0377, 0.0038, 0. ],
[ 0.0075, 0.0528, 0.2264, 0.0075, 0. ],
[ 0.0038, 0.034 , 0.0264, 0.0189, 0. ],
[ 0.0755, 0.0038, 0.0075, 0. , 0.0151]])
"""
if pop_totals == None:
pop_totals = self.sum(axis=1)
return np.nan_to_num( self.astype(float) / self.sum(axis=1) ) * ( pop_totals / pop_totals.sum().astype(float) )
@property
def ob_props( self ):
"""
This is just for testing at the moment so I'm not going to explain.
"""
return self.observed_proportions()
@property
def commission( self ):
"""
Returns commission disagreement values for each category.
For a category i, commission is the total number pixels of in the comparison
map that, according to the reference map, should have been placed in class i
but went to other classes instead. So for category i, commission = all
category i pixels in the comparison map - the pixels that are categorized the
same in the reference map and the comparison map.
The following results also confirmed by comparison with column W in the
SampleMatrix tab of PontiusMatrix24.xlsx spreadsheet from
http://www.clarku.edu/~rpontius after entering the correct values
>>> wundram_table2().observed_proportions().commission.round(4)
ErrorMatrix([ 0.0415, 0.083 , 0.0679, 0.0642, 0.0868])
"""
return self.sum(axis=1) - self.diagonal()
@property
def commission_proportion( self ):
"""
Return error of commission as a proportion for each category.
The following values are consistent with those published in Table 2 of
Wundram and Loffler 2008
>>> wundram_table2().commission_proportion.round(4)
ErrorMatrix([ 0.11 , 0.5789, 0.2308, 0.7727, 0.8519])
"""
return np.nan_to_num( self.commission.astype(float) / self.sum(axis=1) )
@property
def omission( self ):
"""
Returns omission disagreement values for each category.
For a category i, omission is the total number of class i pixels from the
reference map that ended up in non-i categories in the comparison map. So
for category i, omission = all category i pixels in the reference map -
the pixels that were categorized as i in both reference and comparison.
The following results also confirmed by comparison with column X in the
PontiusMatrix24.xlsx spreadsheet from http://www.clarku.edu/~rpontius
>>> wundram_table2().observed_proportions().omission.round(4)
ErrorMatrix([ 0.1283, 0.1019, 0.0981, 0.0113, 0.0038])
"""
return self.sum(axis=0) - self.diagonal()
@property
def omission_proportion( self ):
"""
Return error of omission as a proportion for each category.
The following values are consistent with those published in Table 2 of
Wundram and Loffler 2008 except that where I calculated a value of 0.375,
they rounded to 0.37. I'm still pretty sure I'm doing it right.
>>> wundram_table2().omission_proportion.round(4)
ErrorMatrix([ 0.2764, 0.6279, 0.3023, 0.375 , 0.2 ])
"""
return np.nan_to_num( self.omission.astype(float) / self.sum(axis=0) )
def quantity_disagreement_for_category( self, category ):
"""
From equation 2 in Pontius and Millones 2011. category is an integer
index, zero based.
"""
return abs( self[ category ].sum() - self.T[ category ].sum() )
@property
def quantity_disagreements( self ):
"""
Returns an array of quantity disagreement values. One for each category.
"""
return abs( self.sum(axis=0) - self.sum(axis=1) ).view( np.ndarray )
@property
def quantity_disagreement( self ):
"""
Returns a single quantity disagreement value for the error matrix as
described by equation 3 in Pontius and Millones 2011.
>>> print wundram_table2().observed_proportions().quantity_disagreement.round(4)
0.1358
"""
return self.quantity_disagreements.sum() / 2
def allocation_disagreement_for_category( self, category ):
"""
Returns the allocation disagreement value for a category as described
in equation 4 of Pontuis and Millones 2011.
"""
return 2 * np.array([ self.commission[ category ], self.omission[ category ] ]).min()
@property
def allocation_disagreements( self ):
"""
Returns and array of allocation disagreement values (one for each category)
as described by equation 4 in Pointius and Millones 2011.
"""
return np.array( [ self.allocation_disagreement_for_category(i) for i in range( self.shape[0] ) ] )
@property
def allocation_disagreement( self ):
"""
Returns a single allocation disagreement value for the whole matrix as
described in equation 5 of Pointius and Millones 2011.
The print statement here just keeps it from displaying as 0.20749999999999
>>> print wundram_table2().observed_proportions().allocation_disagreement.round(4)
0.2075
"""
return self.allocation_disagreements.sum() / 2
@property
def overall_accuracy( self ):
"""
Calculate total accuacy from an error matrix.
This rounds to the same value as shown in Congalton 1991 Table 1
>>> print congalton_table1().overall_accuracy.round(6)
73.963134
"""
return 100.0 * self.astype(float).diagonal().sum() / self.sum().item()
def users_accuracy( self, category ):
return self.users_accuracies[ category ]
@property
def users_accuracies( self ):
"""
Return the user's accuracy measures for each category.
Congalton 1991 says, 'if the total number
of correct pixels in a category is divided by the
total number of pixels that were classified in that
category, then this result is a measure of commis-
sion error. This measure, called "user's accuracy"
or reliability, is indicative of the probability that a
pixel classified on the map/image actually repre-
sents that category on the ground'
The following values match those given in Congalton 1991
Table 1 (except these values are not rounded to whole numbers)
>>> congalton_table1().users_accuracies.round(4)
ErrorMatrix([ 56.5217, 81. , 73.913 , 86.5385])
"""
u_acc = 100 * self.diagonal().astype(float) / self.sum(axis=1)
# replace nans with zeros
if np.isnan( u_acc.sum() ):
u_acc = np.nan_to_num( u_acc )
return u_acc
def producers_accuracy( self, category ):
return self.producers_accuracies[ category ]
@property
def producers_accuracies( self ):
"""
Return the producer's accuracy measures for each category.
Congalton 1991 says, 'This accuracy measure indicates
the probability of a reference pixel being correctly
classified and is really a measure of omission error.'
The following values match those given in Congalton 1991
Table 1 (except these values are not rounded to whole numbers)
>>> congalton_table1().producers_accuracies.round(4)
ErrorMatrix([ 86.6667, 78.6408, 73.913 , 63.8298])
"""
p_acc = 100 * self.diagonal().astype(float) / self.sum(axis=0)
# if there are nan values we want to convert to zero
if np.isnan(p_acc.sum()):
p_acc = np.nan_to_num( p_acc )
return p_acc
##/////////////////////// ANNOTATED ARRAYS \\\\\\\\\\\\\\\\\\\\\\\\##
def extend(self, row, col, category, corner=None):
newem = np.vstack( (self,row) )
if type(col) in [ ErrorMatrix, np.ndarray ]:
col = col.tolist()
col.append( corner )
col = np.array([ col ]).T
newem = np.hstack( (newem,col) ).view( ErrorMatrix )
newem.categories = self.categories + [ category ]
newem.title = self.title
return newem
@property
def with_totals( self ):
"""
Returns an array with totals column and row added.
>>> wundram_table2().with_totals
ErrorMatrix([[ 89, 3, 7, 0, 1, 100],
[ 11, 16, 10, 1, 0, 38],
[ 2, 14, 60, 2, 0, 78],
[ 1, 9, 7, 5, 0, 22],
[ 20, 1, 2, 0, 4, 27],
[123, 43, 86, 8, 5, 265]])
"""
row_tots = self.sum(axis=0)
col_tots = self.sum(axis=1)
return self.extend( row_tots, col_tots, 'Totals', self.sum() )
@property
def with_accuracies( self ):
"""
Return ErrorMatrix with accuracies added.
"""
row = self.producers_accuracies.round().astype(int)
col = self.users_accuracies.round().astype(int)
corner = self.overall_accuracy.round().astype(int)
return self.extend( row, col, 'Accuracy', corner )
@property
def with_accuracies_and_totals( self ):
"""
Returns an array with colums and rows added for totals and user's
accuracy, producer's accuracy, and overall accuracy.
These results match those in Congalton 1991. See references.txt for
the full citation.
>>> congalton_table1().with_accuracies_and_totals
ErrorMatrix([[65, 4, 22, 24, 115, 57],
[6, 81, 5, 8, 100, 81],
[0, 11, 85, 19, 115, 74],
[4, 7, 3, 90, 104, 87],
[75, 103, 115, 141, 434, None],
[87, 79, 74, 64, None, 74]], dtype=object)
>>> wundram_table2().with_accuracies_and_totals
ErrorMatrix([[89, 3, 7, 0, 1, 100, 89],
[11, 16, 10, 1, 0, 38, 42],
[2, 14, 60, 2, 0, 78, 77],
[1, 9, 7, 5, 0, 22, 23],
[20, 1, 2, 0, 4, 27, 15],
[123, 43, 86, 8, 5, 265, None],
[72, 37, 70, 62, 80, None, 66]], dtype=object)
"""
row = self.producers_accuracies.round().astype(int).tolist()
col = self.users_accuracies.round().astype(int).tolist()
row.append( None )
col.append( None )
corner = self.overall_accuracy.round().astype(int)
return self.with_totals.extend( row, col, 'Accuracy', corner )
@property
def with_column_labels( self ):
"""
Add column labels from self.categories.
>>> error_matrix(ref_array(),comp_array()).with_column_labels
ErrorMatrix([[ 1, 2, 3, 4],
[15, 0, 0, 0],
[ 2, 9, 0, 0],
[ 3, 0, 13, 0],
[ 7, 0, 0, 20]])
>>> error_matrix(ref_array(),comp_array(),['this','that','other','thing']).with_accuracies_and_totals.with_column_labels
ErrorMatrix([[this, that, other, thing, Totals, Accuracy],
[15, 0, 0, 0, 15, 100],
[2, 9, 0, 0, 11, 82],
[3, 0, 13, 0, 16, 81],
[7, 0, 0, 20, 27, 74],
[27, 9, 13, 20, 69, None],
[56, 100, 100, 100, None, 83]], dtype=object)
"""
labels = self.categories
cols = self.shape[1]
if cols==len( labels ):
newem = np.vstack( (labels, self) )
elif cols==1 + len( labels ):
newem = np.vstack( ( [None] + labels, self ) )
else:
# there was a problem probably should raise an exception
raise Exception("Too many or too few categories for labeling while trying to label columns.")
newem = newem.view(ErrorMatrix)
newem.categories = self.categories
newem.title = self.title
return newem
@property
def with_row_labels( self ):
return self.T.with_column_labels.T
@property
def with_labels( self ):
"""
Add labels from self.categories to the matrix so we can export it as csv.
>>> em = error_matrix(ref_array(),comp_array())
>>> em.with_labels
ErrorMatrix([[None, 1, 2, 3, 4],
[1, 15, 0, 0, 0],
[2, 2, 9, 0, 0],
[3, 3, 0, 13, 0],
[4, 7, 0, 0, 20]], dtype=object)
>>> error_matrix(2 * ref_array(),2 * comp_array()).with_totals.with_labels
ErrorMatrix([[None, 2, 4, 6, 8, Totals],
[2, 15, 0, 0, 0, 15],
[4, 2, 9, 0, 0, 11],
[6, 3, 0, 13, 0, 16],
[8, 7, 0, 0, 20, 27],
[Totals, 27, 9, 13, 20, 69]], dtype=object)
>>> error_matrix(ref_array(),comp_array(),['this','that','other','thing']).with_accuracies_and_totals.with_labels
ErrorMatrix([[None, this, that, other, thing, Totals, Accuracy],
[this, 15, 0, 0, 0, 15, 100],
[that, 2, 9, 0, 0, 11, 82],
[other, 3, 0, 13, 0, 16, 81],
[thing, 7, 0, 0, 20, 27, 74],
[Totals, 27, 9, 13, 20, 69, None],
[Accuracy, 56, 100, 100, 100, None, 83]], dtype=object)
"""
return self.with_column_labels.with_row_labels
###\\\\\\\\\\\\\\\\\\\\\\\\\\ END ANNOTATED ARRAYS //////////////////////
###--------------- OUTPUT -----------------------------------------
def save_csv( self, filepath, annotations=['with_accuracies_and_totals','with_labels'], rounding=None ):
with open(filepath, 'wb') as f:
writer = csv.writer(f)
# copy the array so we can annotate it without screwing it up
arr = self.copy()
# round if needed
if rounding:
arr = arr.round( rounding )
# annotate as specified
for a in annotations: arr = arr.__getattribute__( a )
# write the title if there is one
if self.title:
writer.writerow( [ str( self.title ) ] )
# write the annotated error matrix
writer.writerows( arr )
# write an empty row as a spacer
writer.writerow( [''] )
# write quantity and allocation disagreements
writer.writerow( ['Quantity Disagreement',self.quantity_disagreement] )
writer.writerow( ['Allocation Disagreement',self.allocation_disagreement] )
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
from django.urls import path
from extras.views import ObjectChangeLogView, ImageAttachmentEditView
from ipam.views import ServiceCreateView
from secrets.views import secret_add
from . import views
from .models import (
Cable, ConsolePort, ConsoleServerPort, Device, DeviceRole, DeviceType, FrontPort, Interface, Manufacturer, Platform,
PowerFeed, PowerPanel, PowerPort, PowerOutlet, Rack, RackGroup, RackReservation, RackRole, RearPort, Region, Site,
VirtualChassis,
)
app_name = 'dcim'
urlpatterns = [
# Regions
path(r'regions/', views.RegionListView.as_view(), name='region_list'),
path(r'regions/add/', views.RegionCreateView.as_view(), name='region_add'),
path(r'regions/import/', views.RegionBulkImportView.as_view(), name='region_import'),
path(r'regions/delete/', views.RegionBulkDeleteView.as_view(), name='region_bulk_delete'),
path(r'regions/<int:pk>/edit/', views.RegionEditView.as_view(), name='region_edit'),
path(r'regions/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='region_changelog', kwargs={'model': Region}),
# Sites
path(r'sites/', views.SiteListView.as_view(), name='site_list'),
path(r'sites/add/', views.SiteCreateView.as_view(), name='site_add'),
path(r'sites/import/', views.SiteBulkImportView.as_view(), name='site_import'),
path(r'sites/edit/', views.SiteBulkEditView.as_view(), name='site_bulk_edit'),
path(r'sites/delete/', views.SiteBulkDeleteView.as_view(), name='site_bulk_delete'),
path(r'sites/<slug:slug>/', views.SiteView.as_view(), name='site'),
path(r'sites/<slug:slug>/edit/', views.SiteEditView.as_view(), name='site_edit'),
path(r'sites/<slug:slug>/delete/', views.SiteDeleteView.as_view(), name='site_delete'),
path(r'sites/<slug:slug>/changelog/', ObjectChangeLogView.as_view(), name='site_changelog', kwargs={'model': Site}),
path(r'sites/<int:object_id>/images/add/', ImageAttachmentEditView.as_view(), name='site_add_image', kwargs={'model': Site}),
# Rack groups
path(r'rack-groups/', views.RackGroupListView.as_view(), name='rackgroup_list'),
path(r'rack-groups/add/', views.RackGroupCreateView.as_view(), name='rackgroup_add'),
path(r'rack-groups/import/', views.RackGroupBulkImportView.as_view(), name='rackgroup_import'),
path(r'rack-groups/delete/', views.RackGroupBulkDeleteView.as_view(), name='rackgroup_bulk_delete'),
path(r'rack-groups/<int:pk>/edit/', views.RackGroupEditView.as_view(), name='rackgroup_edit'),
path(r'rack-groups/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='rackgroup_changelog', kwargs={'model': RackGroup}),
# Rack roles
path(r'rack-roles/', views.RackRoleListView.as_view(), name='rackrole_list'),
path(r'rack-roles/add/', views.RackRoleCreateView.as_view(), name='rackrole_add'),
path(r'rack-roles/import/', views.RackRoleBulkImportView.as_view(), name='rackrole_import'),
path(r'rack-roles/delete/', views.RackRoleBulkDeleteView.as_view(), name='rackrole_bulk_delete'),
path(r'rack-roles/<int:pk>/edit/', views.RackRoleEditView.as_view(), name='rackrole_edit'),
path(r'rack-roles/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='rackrole_changelog', kwargs={'model': RackRole}),
# Rack reservations
path(r'rack-reservations/', views.RackReservationListView.as_view(), name='rackreservation_list'),
path(r'rack-reservations/edit/', views.RackReservationBulkEditView.as_view(), name='rackreservation_bulk_edit'),
path(r'rack-reservations/delete/', views.RackReservationBulkDeleteView.as_view(), name='rackreservation_bulk_delete'),
path(r'rack-reservations/<int:pk>/edit/', views.RackReservationEditView.as_view(), name='rackreservation_edit'),
path(r'rack-reservations/<int:pk>/delete/', views.RackReservationDeleteView.as_view(), name='rackreservation_delete'),
path(r'rack-reservations/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='rackreservation_changelog', kwargs={'model': RackReservation}),
# Racks
path(r'racks/', views.RackListView.as_view(), name='rack_list'),
path(r'rack-elevations/', views.RackElevationListView.as_view(), name='rack_elevation_list'),
path(r'racks/add/', views.RackEditView.as_view(), name='rack_add'),
path(r'racks/import/', views.RackBulkImportView.as_view(), name='rack_import'),
path(r'racks/edit/', views.RackBulkEditView.as_view(), name='rack_bulk_edit'),
path(r'racks/delete/', views.RackBulkDeleteView.as_view(), name='rack_bulk_delete'),
path(r'racks/<int:pk>/', views.RackView.as_view(), name='rack'),
path(r'racks/<int:pk>/edit/', views.RackEditView.as_view(), name='rack_edit'),
path(r'racks/<int:pk>/delete/', views.RackDeleteView.as_view(), name='rack_delete'),
path(r'racks/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='rack_changelog', kwargs={'model': Rack}),
path(r'racks/<int:rack>/reservations/add/', views.RackReservationCreateView.as_view(), name='rack_add_reservation'),
path(r'racks/<int:object_id>/images/add/', ImageAttachmentEditView.as_view(), name='rack_add_image', kwargs={'model': Rack}),
# Manufacturers
path(r'manufacturers/', views.ManufacturerListView.as_view(), name='manufacturer_list'),
path(r'manufacturers/add/', views.ManufacturerCreateView.as_view(), name='manufacturer_add'),
path(r'manufacturers/import/', views.ManufacturerBulkImportView.as_view(), name='manufacturer_import'),
path(r'manufacturers/delete/', views.ManufacturerBulkDeleteView.as_view(), name='manufacturer_bulk_delete'),
path(r'manufacturers/<slug:slug>/edit/', views.ManufacturerEditView.as_view(), name='manufacturer_edit'),
path(r'manufacturers/<slug:slug>/changelog/', ObjectChangeLogView.as_view(), name='manufacturer_changelog', kwargs={'model': Manufacturer}),
# Device types
path(r'device-types/', views.DeviceTypeListView.as_view(), name='devicetype_list'),
path(r'device-types/add/', views.DeviceTypeCreateView.as_view(), name='devicetype_add'),
path(r'device-types/import/', views.DeviceTypeBulkImportView.as_view(), name='devicetype_import'),
path(r'device-types/edit/', views.DeviceTypeBulkEditView.as_view(), name='devicetype_bulk_edit'),
path(r'device-types/delete/', views.DeviceTypeBulkDeleteView.as_view(), name='devicetype_bulk_delete'),
path(r'device-types/<int:pk>/', views.DeviceTypeView.as_view(), name='devicetype'),
path(r'device-types/<int:pk>/edit/', views.DeviceTypeEditView.as_view(), name='devicetype_edit'),
path(r'device-types/<int:pk>/delete/', views.DeviceTypeDeleteView.as_view(), name='devicetype_delete'),
path(r'device-types/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='devicetype_changelog', kwargs={'model': DeviceType}),
# Console port templates
path(r'device-types/<int:pk>/console-ports/add/', views.ConsolePortTemplateCreateView.as_view(), name='devicetype_add_consoleport'),
path(r'device-types/<int:pk>/console-ports/delete/', views.ConsolePortTemplateBulkDeleteView.as_view(), name='devicetype_delete_consoleport'),
# Console server port templates
path(r'device-types/<int:pk>/console-server-ports/add/', views.ConsoleServerPortTemplateCreateView.as_view(), name='devicetype_add_consoleserverport'),
path(r'device-types/<int:pk>/console-server-ports/delete/', views.ConsoleServerPortTemplateBulkDeleteView.as_view(), name='devicetype_delete_consoleserverport'),
# Power port templates
path(r'device-types/<int:pk>/power-ports/add/', views.PowerPortTemplateCreateView.as_view(), name='devicetype_add_powerport'),
path(r'device-types/<int:pk>/power-ports/delete/', views.PowerPortTemplateBulkDeleteView.as_view(), name='devicetype_delete_powerport'),
# Power outlet templates
path(r'device-types/<int:pk>/power-outlets/add/', views.PowerOutletTemplateCreateView.as_view(), name='devicetype_add_poweroutlet'),
path(r'device-types/<int:pk>/power-outlets/delete/', views.PowerOutletTemplateBulkDeleteView.as_view(), name='devicetype_delete_poweroutlet'),
# Interface templates
path(r'device-types/<int:pk>/interfaces/add/', views.InterfaceTemplateCreateView.as_view(), name='devicetype_add_interface'),
path(r'device-types/<int:pk>/interfaces/edit/', views.InterfaceTemplateBulkEditView.as_view(), name='devicetype_bulkedit_interface'),
path(r'device-types/<int:pk>/interfaces/delete/', views.InterfaceTemplateBulkDeleteView.as_view(), name='devicetype_delete_interface'),
# Front port templates
path(r'device-types/<int:pk>/front-ports/add/', views.FrontPortTemplateCreateView.as_view(), name='devicetype_add_frontport'),
path(r'device-types/<int:pk>/front-ports/delete/', views.FrontPortTemplateBulkDeleteView.as_view(), name='devicetype_delete_frontport'),
# Rear port templates
path(r'device-types/<int:pk>/rear-ports/add/', views.RearPortTemplateCreateView.as_view(), name='devicetype_add_rearport'),
path(r'device-types/<int:pk>/rear-ports/delete/', views.RearPortTemplateBulkDeleteView.as_view(), name='devicetype_delete_rearport'),
# Device bay templates
path(r'device-types/<int:pk>/device-bays/add/', views.DeviceBayTemplateCreateView.as_view(), name='devicetype_add_devicebay'),
path(r'device-types/<int:pk>/device-bays/delete/', views.DeviceBayTemplateBulkDeleteView.as_view(), name='devicetype_delete_devicebay'),
# Device roles
path(r'device-roles/', views.DeviceRoleListView.as_view(), name='devicerole_list'),
path(r'device-roles/add/', views.DeviceRoleCreateView.as_view(), name='devicerole_add'),
path(r'device-roles/import/', views.DeviceRoleBulkImportView.as_view(), name='devicerole_import'),
path(r'device-roles/delete/', views.DeviceRoleBulkDeleteView.as_view(), name='devicerole_bulk_delete'),
path(r'device-roles/<slug:slug>/edit/', views.DeviceRoleEditView.as_view(), name='devicerole_edit'),
path(r'device-roles/<slug:slug>/changelog/', ObjectChangeLogView.as_view(), name='devicerole_changelog', kwargs={'model': DeviceRole}),
# Platforms
path(r'platforms/', views.PlatformListView.as_view(), name='platform_list'),
path(r'platforms/add/', views.PlatformCreateView.as_view(), name='platform_add'),
path(r'platforms/import/', views.PlatformBulkImportView.as_view(), name='platform_import'),
path(r'platforms/delete/', views.PlatformBulkDeleteView.as_view(), name='platform_bulk_delete'),
path(r'platforms/<slug:slug>/edit/', views.PlatformEditView.as_view(), name='platform_edit'),
path(r'platforms/<slug:slug>/changelog/', ObjectChangeLogView.as_view(), name='platform_changelog', kwargs={'model': Platform}),
# Devices
path(r'devices/', views.DeviceListView.as_view(), name='device_list'),
path(r'devices/add/', views.DeviceCreateView.as_view(), name='device_add'),
path(r'devices/import/', views.DeviceBulkImportView.as_view(), name='device_import'),
path(r'devices/import/child-devices/', views.ChildDeviceBulkImportView.as_view(), name='device_import_child'),
path(r'devices/edit/', views.DeviceBulkEditView.as_view(), name='device_bulk_edit'),
path(r'devices/delete/', views.DeviceBulkDeleteView.as_view(), name='device_bulk_delete'),
path(r'devices/<int:pk>/', views.DeviceView.as_view(), name='device'),
path(r'devices/<int:pk>/edit/', views.DeviceEditView.as_view(), name='device_edit'),
path(r'devices/<int:pk>/delete/', views.DeviceDeleteView.as_view(), name='device_delete'),
path(r'devices/<int:pk>/config-context/', views.DeviceConfigContextView.as_view(), name='device_configcontext'),
path(r'devices/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='device_changelog', kwargs={'model': Device}),
path(r'devices/<int:pk>/inventory/', views.DeviceInventoryView.as_view(), name='device_inventory'),
path(r'devices/<int:pk>/status/', views.DeviceStatusView.as_view(), name='device_status'),
path(r'devices/<int:pk>/lldp-neighbors/', views.DeviceLLDPNeighborsView.as_view(), name='device_lldp_neighbors'),
path(r'devices/<int:pk>/config/', views.DeviceConfigView.as_view(), name='device_config'),
path(r'devices/<int:pk>/add-secret/', secret_add, name='device_addsecret'),
path(r'devices/<int:device>/services/assign/', ServiceCreateView.as_view(), name='device_service_assign'),
path(r'devices/<int:object_id>/images/add/', ImageAttachmentEditView.as_view(), name='device_add_image', kwargs={'model': Device}),
# Console ports
path(r'devices/console-ports/add/', views.DeviceBulkAddConsolePortView.as_view(), name='device_bulk_add_consoleport'),
path(r'devices/<int:pk>/console-ports/add/', views.ConsolePortCreateView.as_view(), name='consoleport_add'),
path(r'devices/<int:pk>/console-ports/delete/', views.ConsolePortBulkDeleteView.as_view(), name='consoleport_bulk_delete'),
path(r'console-ports/<int:termination_a_id>/connect/<str:termination_b_type>/', views.CableCreateView.as_view(), name='consoleport_connect', kwargs={'termination_a_type': ConsolePort}),
path(r'console-ports/<int:pk>/edit/', views.ConsolePortEditView.as_view(), name='consoleport_edit'),
path(r'console-ports/<int:pk>/delete/', views.ConsolePortDeleteView.as_view(), name='consoleport_delete'),
path(r'console-ports/<int:pk>/trace/', views.CableTraceView.as_view(), name='consoleport_trace', kwargs={'model': ConsolePort}),
# Console server ports
path(r'devices/console-server-ports/add/', views.DeviceBulkAddConsoleServerPortView.as_view(), name='device_bulk_add_consoleserverport'),
path(r'devices/<int:pk>/console-server-ports/add/', views.ConsoleServerPortCreateView.as_view(), name='consoleserverport_add'),
path(r'devices/<int:pk>/console-server-ports/edit/', views.ConsoleServerPortBulkEditView.as_view(), name='consoleserverport_bulk_edit'),
path(r'devices/<int:pk>/console-server-ports/delete/', views.ConsoleServerPortBulkDeleteView.as_view(), name='consoleserverport_bulk_delete'),
path(r'console-server-ports/<int:termination_a_id>/connect/<str:termination_b_type>/', views.CableCreateView.as_view(), name='consoleserverport_connect', kwargs={'termination_a_type': ConsoleServerPort}),
path(r'console-server-ports/<int:pk>/edit/', views.ConsoleServerPortEditView.as_view(), name='consoleserverport_edit'),
path(r'console-server-ports/<int:pk>/delete/', views.ConsoleServerPortDeleteView.as_view(), name='consoleserverport_delete'),
path(r'console-server-ports/<int:pk>/trace/', views.CableTraceView.as_view(), name='consoleserverport_trace', kwargs={'model': ConsoleServerPort}),
path(r'console-server-ports/rename/', views.ConsoleServerPortBulkRenameView.as_view(), name='consoleserverport_bulk_rename'),
path(r'console-server-ports/disconnect/', views.ConsoleServerPortBulkDisconnectView.as_view(), name='consoleserverport_bulk_disconnect'),
# Power ports
path(r'devices/power-ports/add/', views.DeviceBulkAddPowerPortView.as_view(), name='device_bulk_add_powerport'),
path(r'devices/<int:pk>/power-ports/add/', views.PowerPortCreateView.as_view(), name='powerport_add'),
path(r'devices/<int:pk>/power-ports/delete/', views.PowerPortBulkDeleteView.as_view(), name='powerport_bulk_delete'),
path(r'power-ports/<int:termination_a_id>/connect/<str:termination_b_type>/', views.CableCreateView.as_view(), name='powerport_connect', kwargs={'termination_a_type': PowerPort}),
path(r'power-ports/<int:pk>/edit/', views.PowerPortEditView.as_view(), name='powerport_edit'),
path(r'power-ports/<int:pk>/delete/', views.PowerPortDeleteView.as_view(), name='powerport_delete'),
path(r'power-ports/<int:pk>/trace/', views.CableTraceView.as_view(), name='powerport_trace', kwargs={'model': PowerPort}),
# Power outlets
path(r'devices/power-outlets/add/', views.DeviceBulkAddPowerOutletView.as_view(), name='device_bulk_add_poweroutlet'),
path(r'devices/<int:pk>/power-outlets/add/', views.PowerOutletCreateView.as_view(), name='poweroutlet_add'),
path(r'devices/<int:pk>/power-outlets/edit/', views.PowerOutletBulkEditView.as_view(), name='poweroutlet_bulk_edit'),
path(r'devices/<int:pk>/power-outlets/delete/', views.PowerOutletBulkDeleteView.as_view(), name='poweroutlet_bulk_delete'),
path(r'power-outlets/<int:termination_a_id>/connect/<str:termination_b_type>/', views.CableCreateView.as_view(), name='poweroutlet_connect', kwargs={'termination_a_type': PowerOutlet}),
path(r'power-outlets/<int:pk>/edit/', views.PowerOutletEditView.as_view(), name='poweroutlet_edit'),
path(r'power-outlets/<int:pk>/delete/', views.PowerOutletDeleteView.as_view(), name='poweroutlet_delete'),
path(r'power-outlets/<int:pk>/trace/', views.CableTraceView.as_view(), name='poweroutlet_trace', kwargs={'model': PowerOutlet}),
path(r'power-outlets/rename/', views.PowerOutletBulkRenameView.as_view(), name='poweroutlet_bulk_rename'),
path(r'power-outlets/disconnect/', views.PowerOutletBulkDisconnectView.as_view(), name='poweroutlet_bulk_disconnect'),
# Interfaces
path(r'devices/interfaces/add/', views.DeviceBulkAddInterfaceView.as_view(), name='device_bulk_add_interface'),
path(r'devices/<int:pk>/interfaces/add/', views.InterfaceCreateView.as_view(), name='interface_add'),
path(r'devices/<int:pk>/interfaces/edit/', views.InterfaceBulkEditView.as_view(), name='interface_bulk_edit'),
path(r'devices/<int:pk>/interfaces/delete/', views.InterfaceBulkDeleteView.as_view(), name='interface_bulk_delete'),
path(r'interfaces/<int:termination_a_id>/connect/<str:termination_b_type>/', views.CableCreateView.as_view(), name='interface_connect', kwargs={'termination_a_type': Interface}),
path(r'interfaces/<int:pk>/', views.InterfaceView.as_view(), name='interface'),
path(r'interfaces/<int:pk>/edit/', views.InterfaceEditView.as_view(), name='interface_edit'),
path(r'interfaces/<int:pk>/assign-vlans/', views.InterfaceAssignVLANsView.as_view(), name='interface_assign_vlans'),
path(r'interfaces/<int:pk>/delete/', views.InterfaceDeleteView.as_view(), name='interface_delete'),
path(r'interfaces/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='interface_changelog', kwargs={'model': Interface}),
path(r'interfaces/<int:pk>/trace/', views.CableTraceView.as_view(), name='interface_trace', kwargs={'model': Interface}),
path(r'interfaces/rename/', views.InterfaceBulkRenameView.as_view(), name='interface_bulk_rename'),
path(r'interfaces/disconnect/', views.InterfaceBulkDisconnectView.as_view(), name='interface_bulk_disconnect'),
# Front ports
# path(r'devices/front-ports/add/', views.DeviceBulkAddFrontPortView.as_view(), name='device_bulk_add_frontport'),
path(r'devices/<int:pk>/front-ports/add/', views.FrontPortCreateView.as_view(), name='frontport_add'),
path(r'devices/<int:pk>/front-ports/edit/', views.FrontPortBulkEditView.as_view(), name='frontport_bulk_edit'),
path(r'devices/<int:pk>/front-ports/delete/', views.FrontPortBulkDeleteView.as_view(), name='frontport_bulk_delete'),
path(r'front-ports/<int:termination_a_id>/connect/<str:termination_b_type>/', views.CableCreateView.as_view(), name='frontport_connect', kwargs={'termination_a_type': FrontPort}),
path(r'front-ports/<int:pk>/edit/', views.FrontPortEditView.as_view(), name='frontport_edit'),
path(r'front-ports/<int:pk>/delete/', views.FrontPortDeleteView.as_view(), name='frontport_delete'),
path(r'front-ports/<int:pk>/trace/', views.CableTraceView.as_view(), name='frontport_trace', kwargs={'model': FrontPort}),
path(r'front-ports/rename/', views.FrontPortBulkRenameView.as_view(), name='frontport_bulk_rename'),
path(r'front-ports/disconnect/', views.FrontPortBulkDisconnectView.as_view(), name='frontport_bulk_disconnect'),
# Rear ports
# path(r'devices/rear-ports/add/', views.DeviceBulkAddRearPortView.as_view(), name='device_bulk_add_rearport'),
path(r'devices/<int:pk>/rear-ports/add/', views.RearPortCreateView.as_view(), name='rearport_add'),
path(r'devices/<int:pk>/rear-ports/edit/', views.RearPortBulkEditView.as_view(), name='rearport_bulk_edit'),
path(r'devices/<int:pk>/rear-ports/delete/', views.RearPortBulkDeleteView.as_view(), name='rearport_bulk_delete'),
path(r'rear-ports/<int:termination_a_id>/connect/<str:termination_b_type>/', views.CableCreateView.as_view(), name='rearport_connect', kwargs={'termination_a_type': RearPort}),
path(r'rear-ports/<int:pk>/edit/', views.RearPortEditView.as_view(), name='rearport_edit'),
path(r'rear-ports/<int:pk>/delete/', views.RearPortDeleteView.as_view(), name='rearport_delete'),
path(r'rear-ports/<int:pk>/trace/', views.CableTraceView.as_view(), name='rearport_trace', kwargs={'model': RearPort}),
path(r'rear-ports/rename/', views.RearPortBulkRenameView.as_view(), name='rearport_bulk_rename'),
path(r'rear-ports/disconnect/', views.RearPortBulkDisconnectView.as_view(), name='rearport_bulk_disconnect'),
# Device bays
path(r'devices/device-bays/add/', views.DeviceBulkAddDeviceBayView.as_view(), name='device_bulk_add_devicebay'),
path(r'devices/<int:pk>/bays/add/', views.DeviceBayCreateView.as_view(), name='devicebay_add'),
path(r'devices/<int:pk>/bays/delete/', views.DeviceBayBulkDeleteView.as_view(), name='devicebay_bulk_delete'),
path(r'device-bays/<int:pk>/edit/', views.DeviceBayEditView.as_view(), name='devicebay_edit'),
path(r'device-bays/<int:pk>/delete/', views.DeviceBayDeleteView.as_view(), name='devicebay_delete'),
path(r'device-bays/<int:pk>/populate/', views.DeviceBayPopulateView.as_view(), name='devicebay_populate'),
path(r'device-bays/<int:pk>/depopulate/', views.DeviceBayDepopulateView.as_view(), name='devicebay_depopulate'),
path(r'device-bays/rename/', views.DeviceBayBulkRenameView.as_view(), name='devicebay_bulk_rename'),
# Inventory items
path(r'inventory-items/', views.InventoryItemListView.as_view(), name='inventoryitem_list'),
path(r'inventory-items/import/', views.InventoryItemBulkImportView.as_view(), name='inventoryitem_import'),
path(r'inventory-items/edit/', views.InventoryItemBulkEditView.as_view(), name='inventoryitem_bulk_edit'),
path(r'inventory-items/delete/', views.InventoryItemBulkDeleteView.as_view(), name='inventoryitem_bulk_delete'),
path(r'inventory-items/<int:pk>/edit/', views.InventoryItemEditView.as_view(), name='inventoryitem_edit'),
path(r'inventory-items/<int:pk>/delete/', views.InventoryItemDeleteView.as_view(), name='inventoryitem_delete'),
path(r'devices/<int:device>/inventory-items/add/', views.InventoryItemEditView.as_view(), name='inventoryitem_add'),
# Cables
path(r'cables/', views.CableListView.as_view(), name='cable_list'),
path(r'cables/import/', views.CableBulkImportView.as_view(), name='cable_import'),
path(r'cables/edit/', views.CableBulkEditView.as_view(), name='cable_bulk_edit'),
path(r'cables/delete/', views.CableBulkDeleteView.as_view(), name='cable_bulk_delete'),
path(r'cables/<int:pk>/', views.CableView.as_view(), name='cable'),
path(r'cables/<int:pk>/edit/', views.CableEditView.as_view(), name='cable_edit'),
path(r'cables/<int:pk>/delete/', views.CableDeleteView.as_view(), name='cable_delete'),
path(r'cables/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='cable_changelog', kwargs={'model': Cable}),
# Console/power/interface connections (read-only)
path(r'console-connections/', views.ConsoleConnectionsListView.as_view(), name='console_connections_list'),
path(r'power-connections/', views.PowerConnectionsListView.as_view(), name='power_connections_list'),
path(r'interface-connections/', views.InterfaceConnectionsListView.as_view(), name='interface_connections_list'),
# Virtual chassis
path(r'virtual-chassis/', views.VirtualChassisListView.as_view(), name='virtualchassis_list'),
path(r'virtual-chassis/add/', views.VirtualChassisCreateView.as_view(), name='virtualchassis_add'),
path(r'virtual-chassis/<int:pk>/edit/', views.VirtualChassisEditView.as_view(), name='virtualchassis_edit'),
path(r'virtual-chassis/<int:pk>/delete/', views.VirtualChassisDeleteView.as_view(), name='virtualchassis_delete'),
path(r'virtual-chassis/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='virtualchassis_changelog', kwargs={'model': VirtualChassis}),
path(r'virtual-chassis/<int:pk>/add-member/', views.VirtualChassisAddMemberView.as_view(), name='virtualchassis_add_member'),
path(r'virtual-chassis-members/<int:pk>/delete/', views.VirtualChassisRemoveMemberView.as_view(), name='virtualchassis_remove_member'),
# Power panels
path(r'power-panels/', views.PowerPanelListView.as_view(), name='powerpanel_list'),
path(r'power-panels/add/', views.PowerPanelCreateView.as_view(), name='powerpanel_add'),
path(r'power-panels/import/', views.PowerPanelBulkImportView.as_view(), name='powerpanel_import'),
path(r'power-panels/delete/', views.PowerPanelBulkDeleteView.as_view(), name='powerpanel_bulk_delete'),
path(r'power-panels/<int:pk>/', views.PowerPanelView.as_view(), name='powerpanel'),
path(r'power-panels/<int:pk>/edit/', views.PowerPanelEditView.as_view(), name='powerpanel_edit'),
path(r'power-panels/<int:pk>/delete/', views.PowerPanelDeleteView.as_view(), name='powerpanel_delete'),
path(r'power-panels/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='powerpanel_changelog', kwargs={'model': PowerPanel}),
# Power feeds
path(r'power-feeds/', views.PowerFeedListView.as_view(), name='powerfeed_list'),
path(r'power-feeds/add/', views.PowerFeedEditView.as_view(), name='powerfeed_add'),
path(r'power-feeds/import/', views.PowerFeedBulkImportView.as_view(), name='powerfeed_import'),
path(r'power-feeds/edit/', views.PowerFeedBulkEditView.as_view(), name='powerfeed_bulk_edit'),
path(r'power-feeds/delete/', views.PowerFeedBulkDeleteView.as_view(), name='powerfeed_bulk_delete'),
path(r'power-feeds/<int:pk>/', views.PowerFeedView.as_view(), name='powerfeed'),
path(r'power-feeds/<int:pk>/edit/', views.PowerFeedEditView.as_view(), name='powerfeed_edit'),
path(r'power-feeds/<int:pk>/delete/', views.PowerFeedDeleteView.as_view(), name='powerfeed_delete'),
path(r'power-feeds/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='powerfeed_changelog', kwargs={'model': PowerFeed}),
]
|
|
import ast
from ast import *
from collections.abc import Iterable
from .astalyzer import FreeVarFinder
from ..base import LanguageNode, ComplexNode, BaseGenerator
from ...helpers import StringWithLocation
from ...runtime.debug import TemplateSyntaxError
import sys
try: # pragma: no cover
import sysconfig
HAS_ASSERT = bool(sysconfig.get_config_var('Py_DEBUG'))
del sysconfig
except ImportError: # pragma: no cover
HAS_ASSERT = False
name_counter = 0
ALWAYS_BUILTINS = '''
False
True
None
'''.split()
def simple_call(func, args=None):
return Call(func=func, args=args or [], keywords=[], starargs=None,
kwargs=None)
def create_argument_list(arguments):
return [arg(arg=id, annotation=None) for id in arguments]
def simple_function_def(name, arguments=()):
arguments = create_argument_list(arguments)
if sys.version_info >= (3, 8):
extra = {'posonlyargs': []}
else:
extra = {}
return FunctionDef(
name=name,
args=ast.arguments(
args=arguments,
vararg=None,
varargannotation=None,
kwonlyargs=[],
kwarg=None,
kwargannotation=None,
defaults=[],
kw_defaults=[],
**extra
),
body=[Pass()],
decorator_list=[],
returns=None
)
def NameX(id, store=False):
return Name(id=id, ctx=Load() if not store else Store())
def adjust_locations(ast_node, first_lineno, first_offset):
"""
Adjust the locations of the ast nodes, offsetting them
to the new lineno and column offset
"""
line_delta = first_lineno - 1
def _fix(node):
if 'lineno' in node._attributes:
lineno = node.lineno
col = node.col_offset
# adjust the offset on the first line
if lineno == 1:
col += first_offset
lineno += line_delta
node.lineno = lineno
node.col_offset = col
for child in iter_child_nodes(node):
_fix(child)
_fix(ast_node)
def get_fragment_ast(expression, mode='eval', adjust=(0, 0)):
if not isinstance(expression, str):
return expression
position = getattr(expression, 'position', (1, 0))
position = position[0] + adjust[0], position[1] + adjust[1]
tree = None
t = None
try:
exp = expression
if expression[-1:] != '\n':
exp = expression + '\n'
tree = ast.parse(exp, mode=mode)
except SyntaxError as e:
lineno = e.lineno
lineno += position[0] - 1
t = TemplateSyntaxError(e.msg, lineno=lineno)
if tree is None:
raise t
adjust_locations(tree, position[0], position[1])
return tree.body
def gen_name(typename=None):
global name_counter
name_counter += 1
if typename:
return "__TK__typed__%s__%d__" % (typename, name_counter)
else:
return "__TK_%d__" % (name_counter)
def static_eval(expr):
if isinstance(expr, UnaryOp) and isinstance(expr.op, Not):
return not static_eval(expr.operand)
return literal_eval(expr)
def static_expr_to_bool(expr):
try:
return bool(static_eval(expr))
except:
return None
class PythonNode(LanguageNode):
is_top_level = False
def generate_output_ast(self, code, generator, parent, escape=False,
position=None):
func = Name(id='__TK__output', ctx=Load())
if not isinstance(code, list):
code = [code]
rv = []
for i in code:
if position is not None:
i.lineno, i.col_offset = position
e = Expr(simple_call(func, [i]))
e.output_args = [i]
rv.append(e)
return rv
def make_buffer_frame(self, body):
new_body = []
new_body.append(Assign(
targets=[
NameX('__TK__output', store=True),
],
value=simple_call(
NameX('__TK__mkbuffer')
)
))
new_body.extend(body)
new_body.append(Return(value=NameX('__TK__output')))
return new_body
def make_function(self, name, body, add_buffer=False, arguments=()):
# ensure that the function name is an str
func = simple_function_def(str(name), arguments=arguments)
new_body = func.body = []
if add_buffer:
new_body.extend(self.make_buffer_frame(body))
else:
new_body.extend(body)
if not new_body:
new_body.append(Pass())
return func
def generate_varscope(self, body):
name = gen_name('variable_scope')
rv = [
self.make_function(name, body,
arguments=['__TK__output', '__TK__escape']),
Expr(simple_call(NameX(name),
[NameX('__TK__output'), NameX('__TK__escape')]))
]
return rv
class PyOutputNode(PythonNode):
def __init__(self, text):
super(PyOutputNode, self).__init__()
self.text = text
def get_expressions(self):
return [self.get_expression()]
def get_expression(self):
return Str(s=self.text)
def generate_ast(self, generator, parent):
return self.generate_output_ast(self.get_expression(), generator,
parent)
class PyTranslatableOutputNode(PyOutputNode):
def __init__(self, text, needs_escape=False):
super(PyTranslatableOutputNode, self).__init__(text)
self.needs_escape = needs_escape
def get_expressions(self):
return [self.get_expression()]
def get_expression(self):
name = 'gettext'
if self.needs_escape:
name = 'egettext'
expr = simple_call(
NameX(name),
[Str(s=self.text)],
)
return expr
class PyExpressionNode(PythonNode):
def __init__(self, expression):
super(PyExpressionNode, self).__init__()
self.expr = expression
def get_expressions(self):
return [self.get_expression()]
def get_expression(self):
return simple_call(
NameX('__TK__escape'),
[self.get_unescaped_expression()]
)
def get_unescaped_expression(self):
return get_fragment_ast(self.expr)
def generate_ast(self, generator, parent):
return self.generate_output_ast(self.get_expression(), generator,
parent)
class PyCodeNode(PythonNode):
def __init__(self, source):
super(PyCodeNode, self).__init__()
self.source = source
def generate_ast(self, generator, parent):
return get_fragment_ast(self.source, mode='exec')
def coalesce_strings(args):
rv = []
str_on = None
for i in args:
if isinstance(i, Str):
if str_on:
str_on.s += i.s
continue
str_on = i
else:
str_on = None
rv.append(i)
return rv
class PyComplexNode(ComplexNode, PythonNode):
def generate_child_ast(self, generator, parent_for_children):
rv = []
for i in self.children:
rv.extend(i.generate_ast(generator, parent_for_children))
return rv
class PyIfNode(PyComplexNode):
def __init__(self, expression):
super(PyIfNode, self).__init__()
self.expression = expression
def generate_ast(self, generator, parent):
test = get_fragment_ast(self.expression)
boolean = static_expr_to_bool(test)
if boolean == False:
return []
if boolean == True:
return self.generate_child_ast(generator, parent)
node = If(
test=test,
body=self.generate_child_ast(generator, self),
orelse=[]
)
return [node]
def PyUnlessNode(self, expression):
expression = get_fragment_ast(expression)
expression = UnaryOp(op=Not(), operand=expression)
return PyIfNode(expression)
class PyImportNode(PythonNode):
def __init__(self, href, alias):
super(PyImportNode, self).__init__()
self.href = str(href)
self.alias = str(alias)
def generate_ast(self, generator, parent):
node = Assign(
targets=[NameX(str(self.alias), store=True)],
value=
simple_call(
func=
Attribute(value=NameX('__TK__runtime', store=False),
attr='import_defs', ctx=Load()),
args=[
NameX('__TK__original_context'),
Str(s=self.href)
]
)
)
if parent.is_top_level:
generator.add_top_level_import(str(self.alias), node)
return []
return [node]
class PyAttributeNode(PyComplexNode):
def __init__(self, name, value):
super(PyAttributeNode, self).__init__()
self.name = name
def get_expressions(self):
rv = []
for i in self.children:
rv.extend(i.get_expressions())
return rv
def generate_ast(self, generator, parent):
if len(self.children) == 1 and \
isinstance(self.children[0], PyExpressionNode):
# special case, the attribute contains a single
# expression, these are handled by
# __TK__output.output_boolean_attr,
# given the name, and unescaped expression!
return [Expr(simple_call(
func=Attribute(
value=NameX('__TK__output'),
attr='output_boolean_attr',
ctx=Load()
),
args=[
Str(s=self.name),
self.children[0].get_unescaped_expression()
]
))]
# otherwise just return the output for the attribute code
# like before
return self.generate_output_ast(
[Str(s=' %s="' % self.name)] +
self.get_expressions() +
[Str(s='"')],
generator, parent
)
class PyAttrsNode(PythonNode):
def __init__(self, expression):
super(PyAttrsNode, self).__init__()
self.expression = expression
def generate_ast(self, generator, parent):
expression = get_fragment_ast(self.expression)
output = simple_call(
NameX('__TK__output_attrs'),
args=[expression]
)
return self.generate_output_ast(output, generator, parent)
class PyForNode(PyComplexNode):
def __init__(self, target_and_expression, parts):
super(PyForNode, self).__init__()
self.target_and_expression = target_and_expression
def generate_contents(self, generator, parent):
lineno, col = getattr(self.target_and_expression, 'position', (1, 0))
body = get_fragment_ast(
StringWithLocation('for %s: pass' % self.target_and_expression,
lineno, col - 4),
'exec',
)
for_node = body[0]
for_node.body = self.generate_child_ast(generator, self)
return [for_node]
def generate_ast(self, generator, parent):
# TODO: this could be needed to be reinstantiated
# return self.generate_varscope(self.generate_contents())
return self.generate_contents(generator, parent)
class PyDefineNode(PyComplexNode):
def __init__(self, funcspec):
super(PyDefineNode, self).__init__()
self.position = getattr(funcspec, 'position', (1, 0))
if '(' not in funcspec:
funcspec += '()'
self.funcspec = funcspec
def generate_ast(self, generator, parent):
body = get_fragment_ast(
StringWithLocation('def %s: pass' % self.funcspec,
self.position[0], self.position[1] - 4),
"exec"
)
def_node = body[0]
name = self.funcspec.partition('(')[0]
def_node.body = self.make_buffer_frame(
self.generate_child_ast(generator, self)
)
# move the function out of the closure
if parent.is_top_level:
generator.add_top_def(def_node.name, def_node)
return []
return [def_node]
class PyComplexExprNode(PyComplexNode):
def get_expressions(self):
rv = []
for i in self.children:
if hasattr(i, 'get_expression'):
rv.append(i.get_expression())
else:
rv.extend(i.get_expressions())
return rv
def generate_ast(self, generator, parent=None):
return self.generate_output_ast(self.get_expressions(),
generator, parent)
class PyBlockNode(PyComplexNode):
def __init__(self, name):
super(PyBlockNode, self).__init__()
self.name = name
def generate_ast(self, generator, parent):
is_extended = isinstance(parent, PyExtendsNode)
name = self.name
blockfunc_name = '__TK__block__%s' % name
position = getattr(name, 'position', (1, 0))
body = get_fragment_ast(
StringWithLocation(
'def %s():pass' % blockfunc_name,
position[0], position[1] - 4),
'exec'
)
def_node = body[0]
def_node.body = self.make_buffer_frame(
self.generate_child_ast(generator, self)
)
if not isinstance(name, str): # pragma: python2
name = name.encode('UTF-8')
generator.add_block(str(name), def_node, blockfunc_name)
if not is_extended:
# call the block in place
return self.generate_output_ast(
[simple_call(NameX(str(self.name)), [])],
self,
parent,
position=position
)
else:
return []
class PyWithNode(PyComplexNode):
def __init__(self, vars):
super(PyWithNode, self).__init__()
self.vars = vars
def generate_ast(self, generator, parent=None):
var_defs = get_fragment_ast(self.vars, 'exec')
body = var_defs + self.generate_child_ast(generator, self)
return self.generate_varscope(body)
class PyExtendsNode(PyComplexNode):
is_top_level = True
def __init__(self, href):
super(PyExtendsNode, self).__init__()
self.href = href
def generate_ast(self, generator, parent=None):
generator.make_extended_template(self.href)
return self.generate_child_ast(generator, self)
def ast_equals(tree1, tree2):
x1 = ast.dump(tree1)
x2 = ast.dump(tree2)
return x1 == x2
def coalesce_outputs(tree):
"""
Coalesce the constant output expressions
__output__('foo')
__output__('bar')
__output__(baz)
__output__('xyzzy')
into
__output__('foobar', baz, 'xyzzy')
"""
coalesce_all_outputs = True
if coalesce_all_outputs:
should_coalesce = lambda n: True
else:
should_coalesce = lambda n: n.output_args[0].__class__ is Str
class OutputCoalescer(NodeVisitor):
def visit(self, node):
# if - else expression also has a body! it is not we want, though.
if hasattr(node, 'body') and isinstance(node.body, Iterable):
# coalesce continuous string output nodes
new_body = []
output_node = None
def coalesce_strs():
if output_node:
output_node.value.args[:] = \
coalesce_strings(output_node.value.args)
for i in node.body:
if hasattr(i, 'output_args') and should_coalesce(i):
if output_node:
if len(output_node.value.args) + len(i.output_args) > 250:
coalesce_strs()
output_node = i
else:
output_node.value.args.extend(i.output_args)
continue
output_node = i
else:
coalesce_strs()
output_node = None
new_body.append(i)
coalesce_strs()
node.body[:] = new_body
NodeVisitor.visit(self, node)
def check(self, node):
"""
Coalesce __TK__output(__TK__escape(literal(x))) into
__TK__output(x).
"""
if not ast_equals(node.func, NameX('__TK__output')):
return
for i in range(len(node.args)):
arg1 = node.args[i]
if not arg1.__class__.__name__ == 'Call':
continue
if not ast_equals(arg1.func, NameX('__TK__escape')):
continue
if len(arg1.args) != 1:
continue
arg2 = arg1.args[0]
if not arg2.__class__.__name__ == 'Call':
continue
if not ast_equals(arg2.func, NameX('literal')):
continue
if len(arg2.args) != 1:
continue
node.args[i] = arg2.args[0]
def visit_Call(self, node):
self.check(node)
self.generic_visit(node)
OutputCoalescer().visit(tree)
def remove_locations(node):
"""
Removes locations from the given AST tree completely
"""
def fix(node):
if 'lineno' in node._attributes and hasattr(node, 'lineno'):
del node.lineno
if 'col_offset' in node._attributes and hasattr(node, 'col_offset'):
del node.col_offset
for child in iter_child_nodes(node):
fix(child)
fix(node)
class PyRootNode(PyComplexNode):
def __init__(self):
super(PyRootNode, self).__init__()
is_top_level = True
def generate_ast(self, generator, parent=None):
main_body = self.generate_child_ast(generator, self)
extended = generator.extended_href
toplevel_funcs = generator.blocks + generator.top_defs
# do not generate __main__ for extended templates
if not extended:
main_func = self.make_function('__main__', main_body,
add_buffer=True)
generator.add_bind_decorator(main_func)
toplevel_funcs = [main_func] + toplevel_funcs
# analyze the set of free variables
free_variables = set()
for i in toplevel_funcs:
fv_info = FreeVarFinder.for_ast(i)
free_variables.update(fv_info.get_free_variables())
# discard __TK__ variables, always builtin names True, False, None
# from free variables.
for i in list(free_variables):
if i.startswith('__TK__') or i in ALWAYS_BUILTINS:
free_variables.discard(i)
# discard the names of toplevel funcs from free variables
free_variables.difference_update(generator.top_level_names)
code = '__TK__mkbuffer = __TK__runtime.Buffer\n'
code += '__TK__escape = __TK__escape_g = __TK__runtime.escape\n'
code += '__TK__output_attrs = __TK__runtime.output_attrs\n'
if extended:
code += '__TK__parent_template = __TK__runtime.load(%r)\n' % \
extended
code += 'def __TK__binder(__TK__context):\n'
code += ' __TK__original_context = __TK__context.copy()\n'
code += ' __TK__bind = __TK__runtime.bind(__TK__context)\n'
code += ' __TK__bindblock = __TK__runtime.bind(__TK__context, ' \
'block=True)\n'
# bind gettext early!
for i in ['egettext']:
if i in free_variables:
free_variables.add('gettext')
free_variables.discard(i)
if 'gettext' in free_variables:
code += ' def egettext(msg):\n'
code += ' return __TK__escape(gettext(msg))\n'
code += ' gettext = __TK__context["gettext"]\n'
free_variables.discard('gettext')
code += ' raise\n' # a placeholder
if extended:
# an extended template does not have a __main__ (it is inherited)
code += ' __TK__parent_template.binder_func(__TK__context)\n'
for i in free_variables:
code += ' if "%s" in __TK__context:\n' % i
code += ' %s = __TK__context["%s"]\n' % (i, i)
code += ' return __TK__context\n'
tree = ast.parse(code)
remove_locations(tree)
class LocatorAndTransformer(ast.NodeTransformer):
binder = None
def visit_FunctionDef(self, node):
if node.name == '__TK__binder' and not self.binder:
self.binder = node
self.generic_visit(node)
return node
locator = LocatorAndTransformer()
locator.visit(tree)
# inject the other top level funcs in the binder
binder = locator.binder
for i, e in enumerate(binder.body):
if isinstance(e, Raise):
break
binder.body[i:i + 1] = toplevel_funcs
binder.body[i:i] = generator.imports
coalesce_outputs(tree)
return tree
# noinspection PyProtectedMember
class LocationMapper(object):
def __init__(self):
self.lineno_map = {1: 1}
self.prev_original_line = 1
self.prev_mapped_line = 1
self.prev_column = 0
def map_linenos(self, node):
if 'lineno' in node._attributes:
if hasattr(node, 'lineno'):
if node.lineno != self.prev_original_line:
self.prev_mapped_line += 1
self.lineno_map[self.prev_mapped_line] = node.lineno
self.prev_original_line = node.lineno
node.lineno = self.prev_mapped_line
if 'col_offset' in node._attributes:
if hasattr(node, 'col_offset'):
self.prev_column = node.col_offset
node.col_offset = self.prev_column
for child in iter_child_nodes(node):
self.map_linenos(child)
class Generator(BaseGenerator):
OutputNode = PyOutputNode
TranslatableOutputNode = PyTranslatableOutputNode
IfNode = PyIfNode
ForNode = PyForNode
DefineNode = PyDefineNode
ComplexExprNode = PyComplexExprNode
ExpressionNode = PyExpressionNode
ImportNode = PyImportNode
RootNode = PyRootNode
AttributeNode = PyAttributeNode
AttrsNode = PyAttrsNode
UnlessNode = PyUnlessNode
ExtendsNode = PyExtendsNode
BlockNode = PyBlockNode
CodeNode = PyCodeNode
WithNode = PyWithNode
def __init__(self, ir_tree):
super(Generator, self).__init__(ir_tree)
self.blocks = []
self.top_defs = []
self.top_level_names = set()
self.extended_href = None
self.imports = []
self.lnotab = None
def add_bind_decorator(self, func, block=True):
binder_call = NameX('__TK__bind' + ('block' if block else ''))
decors = [binder_call]
func.decorator_list = decors
def add_block(self, name, blockfunc, blockfunc_name):
self.top_level_names.add(blockfunc_name)
self.add_bind_decorator(blockfunc, block=True)
self.blocks.append(blockfunc)
def add_top_def(self, name, defblock):
self.top_level_names.add(name)
self.add_bind_decorator(defblock)
self.top_defs.append(defblock)
def add_top_level_import(self, name, node):
self.top_level_names.add(name)
self.imports.append(node)
def make_extended_template(self, href):
self.extended_href = href
def lnotab_info(self):
return self.lnotab
def generate_ast(self):
tree = super(Generator, self).generate_ast()
loc_mapper = LocationMapper()
loc_mapper.map_linenos(tree)
self.lnotab = loc_mapper.lineno_map
return tree
|
|
# -*- coding: utf-8 -*-
"""
Dialogs and classes for editting element properties.
@author: Chris Scott
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import copy
import logging
from PySide2 import QtGui, QtCore, QtWidgets
import numpy as np
from ...system.atoms import elements
from ...visutils.utilities import dataPath, iconPath
################################################################################
class ElementSettingsForm(QtWidgets.QGroupBox):
"""
Form for editing element settings
"""
settingsModified = QtCore.Signal(str)
def __init__(self, sym, parent=None):
self.sym = sym
self.name = elements.atomName(sym)
self.titleText = "%s - %s" % (sym, self.name)
super(ElementSettingsForm, self).__init__(self.titleText, parent=parent)
# form layout
layout = QtWidgets.QFormLayout(self)
self.setAlignment(QtCore.Qt.AlignHCenter)
# colour
rgb = copy.deepcopy(elements.RGB(sym))
col = QtGui.QColor(rgb[0]*255.0, rgb[1]*255.0, rgb[2]*255.0)
self.colour = rgb
# colour button
self.colourButton = QtWidgets.QPushButton("")
self.colourButton.setFixedWidth(50)
self.colourButton.setFixedHeight(30)
self.colourButton.setStyleSheet("QPushButton { background-color: %s }" % col.name())
self.colourButton.clicked.connect(self.showColourDialog)
layout.addRow("Colour", self.colourButton)
# radius
self.radius = elements.covalentRadius(sym)
# radius spin box
self.spinBox = QtWidgets.QDoubleSpinBox(self)
self.spinBox.setSingleStep(0.01)
self.spinBox.setMinimum(0.0)
self.spinBox.setMaximum(100.0)
self.spinBox.setValue(elements.covalentRadius(sym))
self.spinBox.valueChanged[float].connect(self.radiusChanged)
layout.addRow("Radius", self.spinBox)
def showColourDialog(self):
"""
Show colour dialog
"""
sym = self.sym
RGB = self.colour
cur = QtGui.QColor(RGB[0] * 255.0, RGB[1] * 255.0, RGB[2] * 255.0)
col = QtWidgets.QColorDialog.getColor(cur, self, "%s" % sym)
if col.isValid():
self.colourChanged(qtcolour=col)
self.settingsModified.emit(self.sym)
def colourChanged(self, qtcolour=None, colour=None):
"""
Colour changed
"""
if qtcolour is None and colour is None:
return
if qtcolour is not None:
colour = [float(qtcolour.red() / 255.0), float(qtcolour.green() / 255.0), float(qtcolour.blue() / 255.0)]
else:
qtcolour = QtGui.QColor(colour[0] * 255.0, colour[1] * 255.0, colour[2] * 255.0)
self.colour[0] = colour[0]
self.colour[1] = colour[1]
self.colour[2] = colour[2]
self.colourButton.setStyleSheet("QPushButton { background-color: %s }" % qtcolour.name())
def radiusChanged(self, val):
"""
Radius has changed
"""
self.radius = val
self.settingsModified.emit(self.sym)
################################################################################
class ElementEditor(QtWidgets.QDialog):
"""
Element editor dialog
"""
def __init__(self, parent=None):
super(ElementEditor, self).__init__(parent)
self.iniWinFlags = self.windowFlags()
self.setWindowFlags(self.iniWinFlags | QtCore.Qt.WindowStaysOnTopHint)
self.parent = parent
self.mainWindow = parent
self.setModal(0)
self.setWindowTitle("Element editor")
self.setWindowIcon(QtGui.QIcon(iconPath("other/periodic-table-icon.png")))
self.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
# lattice objects
self.systemsDialog = self.mainWindow.systemsDialog
# initial settings
self.dirty = False
self.modifiedListApply = set()
self.modifiedListSave = set()
self.formsDict = {}
layout = QtWidgets.QVBoxLayout(self)
layout.setAlignment(QtCore.Qt.AlignHCenter)
# layout.setContentsMargins(0, 0, 0, 0)
# layout.setSpacing(0)
# combo box with elements
self.elementsCombo = QtWidgets.QComboBox()
self.elementsCombo.currentIndexChanged.connect(self.setWidgetStack)
row = QtWidgets.QHBoxLayout()
row.addStretch(1)
row.addWidget(self.elementsCombo)
row.addStretch(1)
layout.addLayout(row)
# stacked widget
self.stackedWidget = QtWidgets.QStackedWidget()
layout.addWidget(self.stackedWidget)
# populate combo and stacked widget
elementsList = elements.listElements()
for sym in elementsList:
# form for stacked widget
form = ElementSettingsForm(sym, parent=self)
form.settingsModified.connect(self.settingModified)
self.formsDict[sym] = form
# add to stacked widget
self.stackedWidget.addWidget(form)
# add to combo box
self.elementsCombo.addItem("%s - %s" % (sym, elements.atomName(sym)))
# buttons
buttonContainer = QtWidgets.QWidget(self)
buttonLayout = QtWidgets.QHBoxLayout(buttonContainer)
buttonLayout.setContentsMargins(0, 0, 0, 0)
buttonLayout.setSpacing(0)
# apply button
self.applyButton = QtWidgets.QPushButton(QtGui.QIcon(iconPath("redo_64.png")), "Apply")
self.applyButton.setStatusTip("Apply changes to current session")
self.applyButton.setToolTip("Apply changes to current session")
self.applyButton.clicked.connect(self.applyChanges)
self.applyButton.setEnabled(False)
self.saveButton = QtWidgets.QPushButton(QtGui.QIcon(iconPath("save_64.png")), "Save")
self.saveButton.setStatusTip("Save changes for use in future sessions")
self.saveButton.setToolTip("Save changes for use in future sessions")
self.saveButton.clicked.connect(self.saveChanges)
self.saveButton.setEnabled(False)
self.resetButton = QtWidgets.QPushButton(QtGui.QIcon(iconPath("undo_64.png")), "Reset")
self.resetButton.setStatusTip("Reset changes to last applied")
self.resetButton.setToolTip("Reset changes to last applied")
self.resetButton.clicked.connect(self.resetChanges)
self.resetButton.setEnabled(False)
buttonLayout.addWidget(self.applyButton)
buttonLayout.addWidget(self.saveButton)
buttonLayout.addWidget(self.resetButton)
layout.addWidget(buttonContainer)
def applyChanges(self):
"""
Apply changes.
"""
logger = logging.getLogger(__name__+".ElementEditor")
logger.debug("Applying element editor changes (%d)", len(self.modifiedListApply))
for sym in self.modifiedListApply:
settings = self.formsDict[sym]
# radius
radius = settings.radius
# colour
RGB = settings.colour
R = RGB[0]
G = RGB[1]
B = RGB[2]
logger.debug("Applying changes for '%s': rad %.3f; rgb <%.3f, %.3f, %.3f>", sym, radius, R, G, B)
latticeList = self.systemsDialog.getLatticeList()
for latt in latticeList:
# first modify the Lattice objects
if sym in latt.specieList:
index = latt.getSpecieIndex(sym)
# radius
latt.specieCovalentRadius[index] = radius
# RGB
latt.specieRGB[index][0] = R
latt.specieRGB[index][1] = G
latt.specieRGB[index][2] = B
# now modify elements structure
elements.updateCovalentRadius(sym, radius)
elements.updateRGB(sym, R, G, B)
# disable buttons
self.applyButton.setEnabled(False)
self.resetButton.setEnabled(False)
self.modifiedListApply.clear()
self.parent.setStatus("Element properties applied")
def saveChanges(self):
"""
Save changes
"""
logger = logging.getLogger(__name__+".ElementEditor")
logger.debug("Saving element editor changes (%d)", len(self.modifiedListSave))
msgtext = "This will overwrite the current element properties file. You should create a backup first!"
msgtext += "\nModified elements:"
for text in self.modifiedListSave:
msgtext += "\n%s" % text
msgtext += "\n\nDo you wish to continue?"
reply = QtWidgets.QMessageBox.question(self, "Message", msgtext, QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
# apply changes first
self.applyChanges()
# save to file
elements.write(dataPath("atoms.IN"))
# disable save button
self.saveButton.setEnabled(False)
self.modifiedListSave.clear()
self.parent.setStatus("Saved element properties")
def resetChanges(self):
"""
Reset changes.
"""
logger = logging.getLogger(__name__+".ElementEditor")
logger.debug("Resetting element editor changes (%d)", len(self.modifiedListApply))
for sym in self.modifiedListApply:
settings = self.formsDict[sym]
# radius
settings.spinBox.setValue(elements.covalentRadius(sym))
assert settings.radius == elements.covalentRadius(sym)
# colour
rgb = elements.RGB(sym)
settings.colourChanged(colour=rgb)
# disable buttons
self.applyButton.setEnabled(False)
self.resetButton.setEnabled(False)
self.parent.setStatus("Element properties reset")
def settingModified(self, elementText):
"""
Setting has been modified
"""
self.dirty = True
self.resetButton.setEnabled(True)
self.applyButton.setEnabled(True)
self.saveButton.setEnabled(True)
self.modifiedListApply.add(elementText)
self.modifiedListSave.add(elementText)
def setWidgetStack(self, index):
"""
Change stacked widget
"""
self.stackedWidget.setCurrentIndex(index)
|
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for scheduling."""
import sys
from oslo.config import cfg
from oslo.serialization import jsonutils
from nova.compute import flavors
from nova.compute import utils as compute_utils
from nova import db
from nova import exception
from nova.i18n import _, _LE, _LW
from nova import notifications
from nova.objects import base as obj_base
from nova.openstack.common import log as logging
from nova import rpc
LOG = logging.getLogger(__name__)
scheduler_opts = [
cfg.IntOpt('scheduler_max_attempts',
default=3,
help='Maximum number of attempts to schedule an instance'),
]
CONF = cfg.CONF
CONF.register_opts(scheduler_opts)
def build_request_spec(ctxt, image, instances, instance_type=None):
"""Build a request_spec for the scheduler.
The request_spec assumes that all instances to be scheduled are the same
type.
"""
instance = instances[0]
if isinstance(instance, obj_base.NovaObject):
instance = obj_base.obj_to_primitive(instance)
if instance_type is None:
instance_type = flavors.extract_flavor(instance)
# NOTE(comstud): This is a bit ugly, but will get cleaned up when
# we're passing an InstanceType internal object.
extra_specs = db.flavor_extra_specs_get(ctxt, instance_type['flavorid'])
instance_type['extra_specs'] = extra_specs
request_spec = {
'image': image or {},
'instance_properties': instance,
'instance_type': instance_type,
'num_instances': len(instances),
# NOTE(alaski): This should be removed as logic moves from the
# scheduler to conductor. Provides backwards compatibility now.
'instance_uuids': [inst['uuid'] for inst in instances]}
return jsonutils.to_primitive(request_spec)
def set_vm_state_and_notify(context, service, method, updates, ex,
request_spec, db):
"""changes VM state and notifies."""
LOG.warning(_LW("Failed to %(service)s_%(method)s: %(ex)s"),
{'service': service, 'method': method, 'ex': ex})
vm_state = updates['vm_state']
properties = request_spec.get('instance_properties', {})
# NOTE(vish): We shouldn't get here unless we have a catastrophic
# failure, so just set all instances to error. if uuid
# is not set, instance_uuids will be set to [None], this
# is solely to preserve existing behavior and can
# be removed along with the 'if instance_uuid:' if we can
# verify that uuid is always set.
uuids = [properties.get('uuid')]
notifier = rpc.get_notifier(service)
for instance_uuid in request_spec.get('instance_uuids') or uuids:
if instance_uuid:
state = vm_state.upper()
LOG.warning(_LW('Setting instance to %s state.'), state,
instance_uuid=instance_uuid)
# update instance state and notify on the transition
(old_ref, new_ref) = db.instance_update_and_get_original(
context, instance_uuid, updates)
notifications.send_update(context, old_ref, new_ref,
service=service)
compute_utils.add_instance_fault_from_exc(context,
new_ref, ex, sys.exc_info())
payload = dict(request_spec=request_spec,
instance_properties=properties,
instance_id=instance_uuid,
state=vm_state,
method=method,
reason=ex)
event_type = '%s.%s' % (service, method)
notifier.error(context, event_type, payload)
def populate_filter_properties(filter_properties, host_state):
"""Add additional information to the filter properties after a node has
been selected by the scheduling process.
"""
if isinstance(host_state, dict):
host = host_state['host']
nodename = host_state['nodename']
limits = host_state['limits']
else:
host = host_state.host
nodename = host_state.nodename
limits = host_state.limits
# Adds a retry entry for the selected compute host and node:
_add_retry_host(filter_properties, host, nodename)
# Adds oversubscription policy
if not filter_properties.get('force_hosts'):
filter_properties['limits'] = limits
def populate_retry(filter_properties, instance_uuid):
max_attempts = _max_attempts()
force_hosts = filter_properties.get('force_hosts', [])
force_nodes = filter_properties.get('force_nodes', [])
if max_attempts == 1 or force_hosts or force_nodes:
# re-scheduling is disabled.
return
# retry is enabled, update attempt count:
retry = filter_properties.setdefault(
'retry', {
'num_attempts': 0,
'hosts': [] # list of compute hosts tried
})
retry['num_attempts'] += 1
_log_compute_error(instance_uuid, retry)
exc = retry.pop('exc', None)
if retry['num_attempts'] > max_attempts:
msg = (_('Exceeded max scheduling attempts %(max_attempts)d '
'for instance %(instance_uuid)s. '
'Last exception: %(exc)s')
% {'max_attempts': max_attempts,
'instance_uuid': instance_uuid,
'exc': exc})
raise exception.NoValidHost(reason=msg)
def _log_compute_error(instance_uuid, retry):
"""If the request contained an exception from a previous compute
build/resize operation, log it to aid debugging
"""
exc = retry.get('exc') # string-ified exception from compute
if not exc:
return # no exception info from a previous attempt, skip
hosts = retry.get('hosts', None)
if not hosts:
return # no previously attempted hosts, skip
last_host, last_node = hosts[-1]
LOG.error(_LE('Error from last host: %(last_host)s (node %(last_node)s):'
' %(exc)s'),
{'last_host': last_host,
'last_node': last_node,
'exc': exc},
instance_uuid=instance_uuid)
def _max_attempts():
max_attempts = CONF.scheduler_max_attempts
if max_attempts < 1:
raise exception.NovaException(_("Invalid value for "
"'scheduler_max_attempts', must be >= 1"))
return max_attempts
def _add_retry_host(filter_properties, host, node):
"""Add a retry entry for the selected compute node. In the event that
the request gets re-scheduled, this entry will signal that the given
node has already been tried.
"""
retry = filter_properties.get('retry', None)
if not retry:
return
hosts = retry['hosts']
hosts.append([host, node])
def parse_options(opts, sep='=', converter=str, name=""):
"""Parse a list of options, each in the format of <key><sep><value>. Also
use the converter to convert the value into desired type.
:params opts: list of options, e.g. from oslo.config.cfg.ListOpt
:params sep: the separator
:params converter: callable object to convert the value, should raise
ValueError for conversion failure
:params name: name of the option
:returns: a lists of tuple of values (key, converted_value)
"""
good = []
bad = []
for opt in opts:
try:
key, seen_sep, value = opt.partition(sep)
value = converter(value)
except ValueError:
key = None
value = None
if key and seen_sep and value is not None:
good.append((key, value))
else:
bad.append(opt)
if bad:
LOG.warn(_LW("Ignoring the invalid elements of the option "
"%(name)s: %(options)s"),
{'name': name,
'options': ", ".join(bad)})
return good
def validate_filter(filter):
"""Validates that the filter is configured in the default filters."""
return filter in CONF.scheduler_default_filters
|
|
# Building parsed corpora for discourse analysis
# Author: Daniel McDonald
def dictmaker(path,
dictname,
query = 'any',
dictpath = 'data/dictionaries',
lemmatise = False,
just_content_words = False,
use_dependencies = False):
"""makes a pickle wordlist named dictname in dictpath"""
import os
import pickle
import re
import nltk
from time import localtime, strftime
from StringIO import StringIO
import shutil
from collections import Counter
from corpkit.progressbar import ProgressBar
from corpkit.other import tregex_engine
try:
from IPython.display import display, clear_output
except ImportError:
pass
try:
get_ipython().getoutput()
except TypeError:
have_ipython = True
except NameError:
import subprocess
have_ipython = False
if lemmatise:
dictname = dictname + '-lemmatised'
if not dictname.endswith('.p'):
dictname = dictname + '.p'
# allow direct passing of dirs
path_is_list = False
one_big_corpus = False
if type(path) == str:
sorted_dirs = [d for d in os.listdir(path) if os.path.isdir(os.path.join(path,d))]
# if no subcorpora, just do the dir passed in
if len(sorted_dirs) == 0:
one_big_corpus = True
sorted_dirs = [path]
elif type(path) == list:
path_is_list = True
sorted_dirs = sorted(path)
if type(sorted_dirs[0]) == int:
sorted_dirs = [str(d) for d in sorted_dirs]
try:
sorted_dirs.sort(key=int)
except:
pass
try:
if not os.path.exists(dictpath):
os.makedirs(dictpath)
except IOError:
print "Error making " + dictpath + "/ directory."
while os.path.isfile(os.path.join(dictpath, dictname)):
time = strftime("%H:%M:%S", localtime())
selection = raw_input('\n%s: %s already exists in %s.\n' \
' You have the following options:\n\n' \
' a) save with a new name\n' \
' b) delete %s\n' \
' c) exit\n\nYour selection: ' % (time, dictname, dictpath, os.path.join(dictpath, dictname)))
if 'a' in selection:
sel = raw_input('\nNew save name: ')
dictname = sel
if lemmatise:
dictname = dictname.replace('-lemmatised.p', '')
dictname = dictname + '-lemmatised'
if not dictname.endswith('.p'):
dictname = dictname + '.p'
elif 'b' in selection:
os.remove(os.path.join(dictpath, dictname))
elif 'c' in selection:
print ''
return
else:
as_str = str(selection)
print ' Choice "%s" not recognised.' % selection
time = strftime("%H:%M:%S", localtime())
print '\n%s: Extracting words from files ... \n' % time
# all this just to get a list of files and make a better progress bar
if use_dependencies:
counts = []
for d in sorted_dirs:
if not one_big_corpus:
subcorpus = os.path.join(path, d)
else:
subcorpus = path
if use_dependencies:
files = [f for f in os.listdir(subcorpus) if f.endswith('.xml')]
else:
files = [f for f in os.listdir(subcorpus)]
counts.append(len(files))
num_files = sum(counts)
c = 0
p = ProgressBar(num_files)
else:
p = ProgressBar(len(sorted_dirs))
def tokener(xmldata):
"""print word, using good lemmatisation"""
from bs4 import BeautifulSoup
import gc
open_classes = ['N', 'V', 'R', 'J']
result = []
just_good_deps = SoupStrainer('tokens')
soup = BeautifulSoup(xmldata, parse_only=just_good_deps)
for token in soup.find_all('token'):
word = token.word.text
query = re.compile(r'.*')
if re.search(query, word):
if lemmatise:
word = token.lemma.text
if just_content_words:
if not token.pos.text[0] in open_classes:
continue
result.append(word)
# attempt to stop memory problems.
# not sure if this helps, though:
soup.decompose()
soup = None
data = None
gc.collect()
return result
# translate 'any' query
if query == 'any':
if lemmatise:
query = r'__ <# (__ !< __)'
else:
query = r'__ !< __'
if lemmatise:
options = ['-o']
else:
options = ['-t', '-o']
if use_dependencies:
from bs4 import BeautifulSoup, SoupStrainer
if query == 'any':
query = r'.*'
query = re.compile(query)
allwords = []
for index, d in enumerate(sorted_dirs):
if not use_dependencies:
p.animate(index)
if not path_is_list:
if len(sorted_dirs) == 1:
subcorp = d
else:
subcorp = os.path.join(path, d)
else:
subcorp = d
# check query first time through
if not use_dependencies:
if index == 0:
trees_found = tregex_engine(corpus = subcorp, check_for_trees = True)
if not trees_found:
lemmatise = False
dictname = dictname.replace('-lemmatised', '')
if trees_found:
results = tregex_engine(corpus = subcorp, options = options, query = query,
lemmatise = lemmatise,
just_content_words = just_content_words)
for result in results:
allwords.append(result)
elif use_dependencies:
regex_nonword_filter = re.compile("[A-Za-z]")
results = []
fs = [os.path.join(subcorp, f) for f in os.listdir(subcorp)]
for f in fs:
p.animate(c, str(c) + '/' + str(num_files))
c += 1
data = open(f).read()
result_from_a_file = tokener(data)
for w in result_from_a_file:
if re.search(regex_nonword_filter, w):
allwords.append(w.lower())
if not use_dependencies:
if not trees_found:
for f in os.listdir(subcorp):
raw = unicode(open(os.path.join(subcorp, f)).read(), 'utf-8', errors = 'ignore')
sent_tokenizer=nltk.data.load('tokenizers/punkt/english.pickle')
sents = sent_tokenizer.tokenize(raw)
tokenized_sents = [nltk.word_tokenize(i) for i in sents]
for sent in tokenized_sents:
for w in sent:
allwords.append(w.lower())
#100%
p.animate(len(sorted_dirs))
# make a dict
dictionary = Counter(allwords)
with open(os.path.join(dictpath, dictname), 'wb') as handle:
pickle.dump(dictionary, handle)
time = strftime("%H:%M:%S", localtime())
print '\n\n' + time + ': Done! ' + dictname + ' created in ' + dictpath + '/'
def get_urls(url, criteria = False, remove = True):
"""Get a list of all urls within an html document"""
from bs4 import BeautifulSoup
configure_ipython_beautifulsoup(show_html=True, show_css=True, show_js=False)
soup = p(url)
urls = []
if criteria:
import re
regex = re.compile(criteria)
for link in soup.find_all('a'):
a_url = link.get('href')
if a_url.startswith('#'):
continue
if a_url.startswith('/'):
a_url = url + a_url
if criteria:
if re.search(regex, a_url):
if not remove:
urls.append(a_url)
else:
if remove:
urls.append(a_url)
else:
urls.append(a_url)
urls.sort()
filtered_urls = filter(None, urls)
unique_urls = sorted(set(filtered_urls))
return unique_urls
def downloader(url_list, new_path = 'html', wait = 5):
"""download a bunch of urls and store in a local folder"""
import urllib
import time
import os
from time import localtime, strftime
from corpkit.progressbar import ProgressBar
thetime = strftime("%H:%M:%S", localtime())
print "\n%s: Attempting to download %d URLs with %d seconds wait-time ... \n" % (thetime, len(url_list), wait)
p = ProgressBar(len(urls))
if not os.path.exists(new_path):
os.makedirs(new_path)
paths = []
for index, url in enumerate(url_list):
p.animate(index)
base = os.path.basename(url)
new_filename = os.path.join(new_path, base)
paths.append(new_filename)
urllib.urlretrieve(url, new_filename)
time.sleep(wait)
p.animate(len(url_list))
num_downloaded = len(paths)
thetime = strftime("%H:%M:%S", localtime())
print '\n\n%s: Done! %d files downloaded.' % (thetime, num_downloaded)
return paths
def simple_text_extractor(html, stopwords = 'English'):
"""extract text from html/xml files using justext"""
import requests
import justext
import os
import copy
# if on hard disk:
if type(html) != list:
html_files = [copy.deepcopy(html)]
else:
html_files = copy.deepcopy(html)
output = []
for html in html_files:
if os.path.isfile(html):
f = open(html)
raw_html_text = f.read()
# if it's a web address
elif html.startswith('http'):
response = requests.get(html)
raw_html_text = response.content
# if it's already html text:
else:
raw_html_text = copy.deepcopy(html)
paragraphs = justext.justext(raw_html_text, justext.get_stoplist(stopwords))
text = []
for paragraph in paragraphs:
if not paragraph.is_boilerplate:
text.append(paragraph.text)
text = '\n'.join(text)
metadata = os.path.basename(html)
tup = (text, metadata)
output.append(tup)
return output
def practice_run(path_to_html_file):
import os
import warnings
from bs4 import BeautifulSoup
if type(path_to_html_file) == list:
path_to_html_file = str(path_to_html_file[0])
f = open(path_to_html_file)
raw = f.read()
soup = BeautifulSoup(raw)
try:
text = get_text(soup)
except:
function_defined = False
from corpkit.build import simple_text_extractor
simple_text_extractor(path_to_html_file)
try:
metadata = get_metadata(soup)
except:
warnings.warn('get_metadata function not defined. Using filename as metadata.')
metadata = os.path.basename(path_to_html_file)
print 'text: %s\n\nmetadata: %s' %(text, metadata)
def souper(corpus_path):
"""The aim is to make a tuple of (text, metadata)"""
import os
from bs4 import BeautifulSoup
for root, dirs, files in os.walk(corpus_path, topdown=True):
for name in files:
filepath = os.path.join(root, name)
f = open(filepath)
raw = f.read()
soup = BeautifulSoup(raw)
def correctspelling(path, newpath):
"""Feed this function an unstructured corpus and get a version with corrected spelling"""
import enchant
import codecs
import os
subdirs = [d for d in os.listdir(path) if os.path.isdir(d)]
for subdir in subdirs:
txtFiles = [f for f in os.listdir(os.path.join(path,subdir)) if f.endswith(".txt")]
print 'Doing ' + subdir + ' ...'
for txtFile in txtFiles:
d = enchant.Dict("en_UK")
try:
f = codecs.open(os.path.join(path,subdir,txtFile), "r", "utf-8")
except IOError:
print "Error reading the file, right filepath?"
return
textdata = f.read()
textdata = unicode(textdata, 'utf-8')
mispelled = [] # empty list. Gonna put mispelled words in here
words = textdata.split()
for word in words:
# if spell check failed and the word is also not in
# our mis-spelled list already, then add the word
if d.check(word) == False and word not in mispelled:
mispelled.append(word)
# print mispelled
for mspellword in mispelled:
mspellword_withboundaries = '\b' + str(mspellword) + '\b'
#get suggestions
suggestions=d.suggest(mspellword)
#make sure we actually got some
if len(suggestions) > 0:
# pick the first one
picksuggestion=suggestions[0]
picksuggestion_withboundaries = '\b' + str(picksuggestion) + '\b'
textdata = textdata.replace(mspellword_withboundaries,picksuggestion_withboundaries)
try:
if not os.path.exists(newpath):
os.makedirs(newpath)
fo=open(os.path.join(newpath, txtFile), "w")
except IOError:
print "Error"
return
fo.write(textdata.encode("UTF-8"))
fo.close()
return
def stanford_parse(corpus_path):
"""Parse a directory (recursively) with the Stanford parser..."""
import os
import ast
try:
from corenlp import StanfordCoreNLP
except:
raise ValueError("CoreNLP not installed.")
path_part, corpus_name = os.path.split(corpus_path)
new_corpus_folder = 'parsed_%s' % corpus_name
new_corpus_path = os.path.join(path_part, new_corpus_folder)
if not os.path.exists(new_corpus_path):
os.makedirs(new_corpus_path)
corenlp = StanfordCoreNLP()
files = os.listdir(corpus_path)
for root, dirs, files in os.walk(corpus_path, topdown=True):
for name in files:
filepath = os.path.join(root, name)
f = open(filepath)
raw = f.read()
parsed_text = ast.literal_eval(corenlp.parse(raw))
for index, sent in enumerate(parsed_text['sentences']):
syntax_tree = sent['parsetree']
plain_text = sent['text']
subcorpus_path = os.path.join(new_corpus_path, subcorpus_name)
if not os.path.exists(subcorpus_path):
os.makedirs(subcorpus_path)
def structure_corpus(path_to_files, new_corpus_name = 'structured_corpus'):
"""structure a corpus in some kind of sequence"""
import os
import shutil
base = os.path.basename(path_to_files)
new_corpus_name = 'structured_' + base
if not os.path.isdir(path_to_files):
raise ValueError('Directory not found: %s' % path_to_files)
if not os.path.exists(new_corpus_name):
os.makedirs(new_corpus_name)
files = os.listdir(path_to_files)
for f in files:
filepath = os.path.join(path_to_files, f)
subcorpus_name = 'what goes here?'
subcorpus_path = os.path.join(new_corpus_name, subcorpus_name)
if not os.path.exists(subcorpus_path):
os.makedirs(subcorpus_path)
shutil.copy(filepath, subcorpus_path)
print 'Done!'
def edit_metadata():
return edited
def stanford_parse(data, corpus_name = 'corpus'):
from time import localtime, strftime
thetime = strftime("%H:%M:%S", localtime())
print "\n%s: Initialising CoreNLP... \n" % thetime
import os
import ast
try:
from corenlp import StanfordCoreNLP
except:
raise ValueError("CoreNLP not installed.")
from corpkit.progressbar import ProgressBar
corenlp = StanfordCoreNLP()
if not os.path.exists(corpus_name):
os.makedirs(corpus_name)
p = ProgressBar(len(data))
for index, datum in enumerate(data):
p.animate(index)
text = datum[0]
metadata = datum[1]
number_of_zeroes = len(str(len(data))) - 1
filename = str(index).zfill(number_of_zeroes) + '.txt'
file_data = []
parsed_text = ast.literal_eval(corenlp.parse(text))
trees = []
raw_texts = []
for index, sent in enumerate(parsed_text['sentences']):
syntax_tree = sent['parsetree']
plain_text = sent['text']
trees.append(syntax_tree)
raw_texts.append(plain_text)
#subcorpus_path = os.path.join(new_corpus_path, subcorpus_name)
file_data = ['<raw>' + '\n'.join(raw_texts) + '\n</raw>', '<parse>' + '\n'.join(trees) + '\n</parse>', ]
if not os.path.exists(os.path.join(corpus_name, metadata)):
os.makedirs(os.path.join(corpus_name, metadata))
try:
fo=open(os.path.join(corpus_name, metadata, filename),"w")
except IOError:
print "Error writing file."
fo.write('\n'.join(file_data))
fo.close()
p.animate(len(data))
print 'Done!'
|
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.layout_tests.controllers import repaint_overlay
from webkitpy.layout_tests.models import test_failures
_log = logging.getLogger(__name__)
def write_test_result(filesystem, port, results_directory, test_name, driver_output,
expected_driver_output, failures):
"""Write the test result to the result output directory."""
root_output_dir = results_directory
writer = TestResultWriter(filesystem, port, root_output_dir, test_name)
if driver_output.error:
writer.write_stderr(driver_output.error)
for failure in failures:
# FIXME: Instead of this long 'if' block, each failure class might
# have a responsibility for writing a test result.
if isinstance(failure, (test_failures.FailureMissingResult,
test_failures.FailureTextMismatch,
test_failures.FailureTestHarnessAssertion)):
writer.write_text_files(driver_output.text, expected_driver_output.text)
writer.create_text_diff_and_write_result(driver_output.text, expected_driver_output.text)
elif isinstance(failure, test_failures.FailureMissingImage):
writer.write_image_files(driver_output.image, expected_image=None)
elif isinstance(failure, test_failures.FailureMissingImageHash):
writer.write_image_files(driver_output.image, expected_driver_output.image)
elif isinstance(failure, test_failures.FailureImageHashMismatch):
writer.write_image_files(driver_output.image, expected_driver_output.image)
writer.write_image_diff_files(driver_output.image_diff)
elif isinstance(failure, (test_failures.FailureAudioMismatch,
test_failures.FailureMissingAudio)):
writer.write_audio_files(driver_output.audio, expected_driver_output.audio)
elif isinstance(failure, test_failures.FailureCrash):
crashed_driver_output = expected_driver_output if failure.is_reftest else driver_output
writer.write_crash_log(crashed_driver_output.crash_log)
elif isinstance(failure, test_failures.FailureLeak):
writer.write_leak_log(driver_output.leak_log)
elif isinstance(failure, test_failures.FailureReftestMismatch):
writer.write_image_files(driver_output.image, expected_driver_output.image)
# FIXME: This work should be done earlier in the pipeline (e.g., when we compare images for non-ref tests).
# FIXME: We should always have 2 images here.
if driver_output.image and expected_driver_output.image:
diff_image, _ = port.diff_image(expected_driver_output.image, driver_output.image)
if diff_image:
writer.write_image_diff_files(diff_image)
else:
_log.warning('ref test mismatch did not produce an image diff.')
writer.write_image_files(driver_output.image, expected_image=None)
if filesystem.exists(failure.reference_filename):
writer.write_reftest(failure.reference_filename)
else:
_log.warning("reference %s was not found", failure.reference_filename)
elif isinstance(failure, test_failures.FailureReftestMismatchDidNotOccur):
writer.write_image_files(driver_output.image, expected_image=None)
if filesystem.exists(failure.reference_filename):
writer.write_reftest(failure.reference_filename)
else:
_log.warning("reference %s was not found", failure.reference_filename)
else:
assert isinstance(failure, (test_failures.FailureTimeout, test_failures.FailureReftestNoImagesGenerated))
if expected_driver_output is not None:
writer.create_repaint_overlay_result(driver_output.text, expected_driver_output.text)
def baseline_name(filesystem, test_name, suffix):
base = filesystem.splitext(test_name)[0]
return '%s%s.%s' % (base, TestResultWriter.FILENAME_SUFFIX_EXPECTED, suffix)
class TestResultWriter(object):
"""A class which handles all writing operations to the result directory."""
# Filename pieces when writing failures to the test results directory.
FILENAME_SUFFIX_ACTUAL = "-actual"
FILENAME_SUFFIX_EXPECTED = "-expected"
FILENAME_SUFFIX_DIFF = "-diff"
FILENAME_SUFFIX_STDERR = "-stderr"
FILENAME_SUFFIX_CRASH_LOG = "-crash-log"
FILENAME_SUFFIX_SAMPLE = "-sample"
FILENAME_SUFFIX_LEAK_LOG = "-leak-log"
FILENAME_SUFFIX_WDIFF = "-wdiff.html"
FILENAME_SUFFIX_PRETTY_PATCH = "-pretty-diff.html"
FILENAME_SUFFIX_IMAGE_DIFF = "-diff.png"
FILENAME_SUFFIX_IMAGE_DIFFS_HTML = "-diffs.html"
FILENAME_SUFFIX_OVERLAY = "-overlay.html"
def __init__(self, filesystem, port, root_output_dir, test_name):
self._filesystem = filesystem
self._port = port
self._root_output_dir = root_output_dir
self._test_name = test_name
def _make_output_directory(self):
"""Creates the output directory (if needed) for a given test filename."""
fs = self._filesystem
output_filename = fs.join(self._root_output_dir, self._test_name)
fs.maybe_make_directory(fs.dirname(output_filename))
def output_filename(self, modifier):
"""Returns a filename inside the output dir that contains modifier.
For example, if test name is "fast/dom/foo.html" and modifier is "-expected.txt",
the return value is "/<path-to-root-output-dir>/fast/dom/foo-expected.txt".
Args:
modifier: a string to replace the extension of filename with
Return:
The absolute path to the output filename
"""
fs = self._filesystem
output_filename = fs.join(self._root_output_dir, self._test_name)
return fs.splitext(output_filename)[0] + modifier
def _write_file(self, path, contents):
if contents is not None:
self._make_output_directory()
self._filesystem.write_binary_file(path, contents)
def _output_testname(self, modifier):
fs = self._filesystem
return fs.splitext(fs.basename(self._test_name))[0] + modifier
def write_output_files(self, file_type, output, expected):
"""Writes the test output, the expected output in the results directory.
The full output filename of the actual, for example, will be
<filename>-actual<file_type>
For instance,
my_test-actual.txt
Args:
file_type: A string describing the test output file type, e.g. ".txt"
output: A string containing the test output
expected: A string containing the expected test output
"""
actual_filename = self.output_filename(self.FILENAME_SUFFIX_ACTUAL + file_type)
expected_filename = self.output_filename(self.FILENAME_SUFFIX_EXPECTED + file_type)
self._write_file(actual_filename, output)
self._write_file(expected_filename, expected)
def write_stderr(self, error):
filename = self.output_filename(self.FILENAME_SUFFIX_STDERR + ".txt")
self._write_file(filename, error)
def write_crash_log(self, crash_log):
filename = self.output_filename(self.FILENAME_SUFFIX_CRASH_LOG + ".txt")
self._write_file(filename, crash_log.encode('utf8', 'replace'))
def write_leak_log(self, leak_log):
filename = self.output_filename(self.FILENAME_SUFFIX_LEAK_LOG + ".txt")
self._write_file(filename, leak_log)
def copy_sample_file(self, sample_file):
filename = self.output_filename(self.FILENAME_SUFFIX_SAMPLE + ".txt")
self._filesystem.copyfile(sample_file, filename)
def write_text_files(self, actual_text, expected_text):
self.write_output_files(".txt", actual_text, expected_text)
def create_text_diff_and_write_result(self, actual_text, expected_text):
# FIXME: This function is actually doing the diffs as well as writing results.
# It might be better to extract code which does 'diff' and make it a separate function.
if not actual_text or not expected_text:
return
file_type = '.txt'
actual_filename = self.output_filename(self.FILENAME_SUFFIX_ACTUAL + file_type)
expected_filename = self.output_filename(self.FILENAME_SUFFIX_EXPECTED + file_type)
# We treat diff output as binary. Diff output may contain multiple files
# in conflicting encodings.
diff = self._port.diff_text(expected_text, actual_text, expected_filename, actual_filename)
diff_filename = self.output_filename(self.FILENAME_SUFFIX_DIFF + file_type)
self._write_file(diff_filename, diff)
# Shell out to wdiff to get colored inline diffs.
if self._port.wdiff_available():
wdiff = self._port.wdiff_text(expected_filename, actual_filename)
wdiff_filename = self.output_filename(self.FILENAME_SUFFIX_WDIFF)
self._write_file(wdiff_filename, wdiff)
# Use WebKit's PrettyPatch.rb to get an HTML diff.
if self._port.pretty_patch_available():
pretty_patch = self._port.pretty_patch_text(diff_filename)
pretty_patch_filename = self.output_filename(self.FILENAME_SUFFIX_PRETTY_PATCH)
self._write_file(pretty_patch_filename, pretty_patch)
def create_repaint_overlay_result(self, actual_text, expected_text):
html = repaint_overlay.generate_repaint_overlay_html(self._test_name, actual_text, expected_text)
if html:
overlay_filename = self.output_filename(self.FILENAME_SUFFIX_OVERLAY)
self._write_file(overlay_filename, html)
def write_audio_files(self, actual_audio, expected_audio):
self.write_output_files('.wav', actual_audio, expected_audio)
def write_image_files(self, actual_image, expected_image):
self.write_output_files('.png', actual_image, expected_image)
def write_image_diff_files(self, image_diff):
diff_filename = self.output_filename(self.FILENAME_SUFFIX_IMAGE_DIFF)
self._write_file(diff_filename, image_diff)
diffs_html_filename = self.output_filename(self.FILENAME_SUFFIX_IMAGE_DIFFS_HTML)
# FIXME: old-run-webkit-tests shows the diff percentage as the text contents of the "diff" link.
# FIXME: old-run-webkit-tests include a link to the test file.
html = """<!DOCTYPE HTML>
<html>
<head>
<title>%(title)s</title>
<style>.label{font-weight:bold}</style>
</head>
<body>
Difference between images: <a href="%(diff_filename)s">diff</a><br>
<div class=imageText></div>
<div class=imageContainer data-prefix="%(prefix)s">Loading...</div>
<script>
(function() {
var preloadedImageCount = 0;
function preloadComplete() {
++preloadedImageCount;
if (preloadedImageCount < 2)
return;
toggleImages();
setInterval(toggleImages, 2000)
}
function preloadImage(url) {
image = new Image();
image.addEventListener('load', preloadComplete);
image.src = url;
return image;
}
function toggleImages() {
if (text.textContent == 'Expected Image') {
text.textContent = 'Actual Image';
container.replaceChild(actualImage, container.firstChild);
} else {
text.textContent = 'Expected Image';
container.replaceChild(expectedImage, container.firstChild);
}
}
var text = document.querySelector('.imageText');
var container = document.querySelector('.imageContainer');
var actualImage = preloadImage(container.getAttribute('data-prefix') + '-actual.png');
var expectedImage = preloadImage(container.getAttribute('data-prefix') + '-expected.png');
})();
</script>
</body>
</html>
""" % {
'title': self._test_name,
'diff_filename': self._output_testname(self.FILENAME_SUFFIX_IMAGE_DIFF),
'prefix': self._output_testname(''),
}
self._write_file(diffs_html_filename, html)
def write_reftest(self, src_filepath):
fs = self._filesystem
dst_dir = fs.dirname(fs.join(self._root_output_dir, self._test_name))
dst_filepath = fs.join(dst_dir, fs.basename(src_filepath))
self._write_file(dst_filepath, fs.read_binary_file(src_filepath))
|
|
#!/usr/bin/python
from vm import VM
from vmhost import VMhost
import testcases
def case_simple_swap():
testcases.utils.create_vmhosts(2, 'x86_64', 4096)
vm1 = VM('vm1', 'x86_64', 256)
vm2 = VM('vm2', 'x86_64', 256)
stateA = {
'host1' : [ vm1 ],
'host2' : [ vm2 ],
}
stateB = {
'host1' : [ vm2 ],
'host2' : [ vm1 ],
}
expected_path = """\
shutdown:
! vm1: host1 -> host2 cost 256
! vm2: host2 -> host1 cost 256
provision:
"""
return (stateA, stateB, expected_path)
def case_simple_cessation():
testcases.utils.create_vmhosts(3, 'x86_64', 4096)
vm1 = VM('vm1', 'x86_64', 3256)
vm2 = VM('vm2', 'x86_64', 3256)
stateA = {
'host1' : [ vm1 ],
'host2' : [ vm2 ],
'host3' : [ ],
}
stateB = {
'host1' : [ vm2 ],
'host2' : [ ],
'host3' : [ vm1 ],
}
expected_path = """\
shutdown:
! vm1: host1 -> host3 cost 3256
! vm2: host2 -> host1 cost 3256
provision:
"""
return (stateA, stateB, expected_path)
def case_swap_with_one_temp():
testcases.utils.create_vmhosts(3, 'x86_64', 4096)
vm1 = VM('vm1', 'x86_64', 3256)
vm2 = VM('vm2', 'x86_64', 3256)
stateA = {
'host1' : [ vm1 ],
'host2' : [ vm2 ],
'host3' : [ ],
}
stateB = {
'host1' : [ vm2 ],
'host2' : [ vm1 ],
'host3' : [ ],
}
expected_path = """\
shutdown:
! vm2: host2 -> host3 cost 3256
! vm1: host1 -> host2 cost 3256
! vm2: host3 -> host1 cost 3256
provision:
"""
return (stateA, stateB, expected_path)
def case_complex_swap():
testcases.utils.create_vmhosts(3, 'x86_64', 4096)
vm1 = VM('vm1', 'x86_64', 300)
vm2 = VM('vm2', 'x86_64', 3000)
vm3 = VM('vm3', 'x86_64', 3700)
stateA = {
'host1' : [ vm1 ],
'host2' : [ vm2 ],
'host3' : [ vm3 ],
}
stateB = {
'host1' : [ vm1 ],
'host2' : [ vm3 ],
'host3' : [ vm2 ],
}
expected_path = """\
shutdown:
! vm2: host2 -> host1 cost 3000
! vm3: host3 -> host2 cost 3700
! vm2: host1 -> host3 cost 3000
provision:
"""
return (stateA, stateB, expected_path)
def case_complex_pair_swap():
testcases.utils.create_vmhosts(2, 'x86_64', 4096)
vm1 = VM('vm1', 'x86_64', 1645)
vm2 = VM('vm2', 'x86_64', 2049)
vm3 = VM('vm3', 'x86_64', 459)
vm4 = VM('vm4', 'x86_64', 222)
stateA = {
'host1' : [ vm3, vm4 ],
'host2' : [ vm1, vm2 ],
}
stateB = {
'host1' : [ vm1, vm2 ],
'host2' : [ vm3, vm4 ],
}
expected_path = """\
shutdown:
! vm1: host2 -> host1 cost 1645
! vm3: host1 -> host2 cost 459
! vm4: host1 -> host2 cost 222
! vm2: host2 -> host1 cost 2049
provision:
"""
return (stateA, stateB, expected_path)
def case_shutdown_and_swap():
host1 = VMhost('host1', 'x86_64', 4096)
host2 = VMhost('host2', 'x86_64', 3048)
host3 = VMhost('host3', 'i386' , 4096)
host4 = VMhost('host4', 'i386' , 2448)
vm1 = VM('vm1', 'x86_64', 2048)
vm2 = VM('vm2', 'x86_64', 1024)
vm3 = VM('vm3', 'x86_64', 1024)
vm4 = VM('vm4', 'x86_64', 512)
vm5 = VM('vm5', 'i386', 1024)
vm6 = VM('vm6', 'i386', 1024)
vm7 = VM('vm7', 'i386', 768)
vm8 = VM('vm8', 'i386', 512)
vm9 = VM('vm9', 'i386', 256)
stateA = {
'host1' : [ vm1, vm2 ],
'host2' : [ vm3, vm4, vm9 ],
'host3' : [ vm7, vm8 ],
'host4' : [ vm5, vm6 ]
}
# swap vm5 and vm9
stateB = {
'host1' : [ vm1 ],
'host2' : [ vm3, vm4, vm5 ],
'host3' : [ ],
'host4' : [ vm9 ]
}
expected_path = """\
shutdown: vm2, vm6, vm7, vm8
! vm9: host2 -> host4 cost 256
! vm5: host4 -> host2 cost 1024
provision:
"""
return (stateA, stateB, expected_path)
def case_tricky():
# N.B. 256MB required for dom0 (hardcoded).
# For the sake of easy maths in the example
# we create 2000MB available for domUs.
host1 = VMhost('host1', 'x86_64', 2256)
host2 = VMhost('host2', 'x86_64', 2256)
host3 = VMhost('host3', 'i386', 2256)
vm1 = VM('vm1', 'x86_64', 1000)
vm2 = VM('vm2', 'x86_64', 1000)
# vm3 = VM('vm3', 'i386', 900)
vm3 = VM('vm3', 'x86_64', 900)
vm4 = VM('vm4', 'i386' , 900)
vm5 = VM('vm5', 'i386' , 150)
vm6 = VM('vm6', 'i386' , 150)
stateA = {
'host1' : [ vm1, vm3 ],
'host2' : [ vm2, vm4 ],
'host3' : [ vm5, vm6 ],
}
stateB = {
'host1' : [ vm1, vm2 ],
'host2' : [ vm3, vm4, vm5 ],
'host3' : [ ],
}
expected_path = """\
shutdown: vm6
! vm4: host2 -> host3 cost 900
! vm3: host1 -> host2 cost 900
! vm2: host2 -> host1 cost 1000
! vm4: host3 -> host2 cost 900
! vm5: host3 -> host2 cost 150
provision:
"""
return (stateA, stateB, expected_path)
def case_chain4():
# N.B. 256MB required for dom0 (hardcoded).
# For the sake of easy maths in the example
# we create 2000MB available for domUs.
testcases.utils.create_vmhosts(5, 'x86_64', 1256)
big1 = VM('big1', 'x86_64', 500)
big2 = VM('big2', 'x86_64', 510)
big3 = VM('big3', 'x86_64', 520)
big4 = VM('big4', 'x86_64', 530)
small1 = VM('small1', 'x86_64', 370)
small2 = VM('small2', 'x86_64', 380)
small3 = VM('small3', 'x86_64', 390)
small4 = VM('small4', 'x86_64', 400)
tiny1 = VM('tiny1', 'x86_64', 100)
tiny2 = VM('tiny2', 'x86_64', 100)
tiny3 = VM('tiny3', 'x86_64', 100)
tiny4 = VM('tiny4', 'x86_64', 100)
stateA = {
'host1' : [ big1, small1 ],
'host2' : [ big2, small2 ],
'host3' : [ big3, small3 ],
'host4' : [ big4, small4 ],
'host5' : [ tiny1, tiny2, tiny3, tiny4 ],
}
stateB = {
'host1' : [ big1, small4, tiny1 ],
'host2' : [ big2, small3, tiny2 ],
'host3' : [ big3, small2, tiny3 ],
'host4' : [ big4, small1, tiny4 ],
'host5' : [ ],
}
expected_path = """\
shutdown:
! big1: host1 -> host5 cost 500
! small4: host4 -> host1 cost 400
! small1: host1 -> host4 cost 370
! big1: host5 -> host1 cost 500
! big2: host2 -> host5 cost 510
! small3: host3 -> host2 cost 390
! small2: host2 -> host3 cost 380
! big2: host5 -> host2 cost 510
! tiny1: host5 -> host1 cost 100
! tiny2: host5 -> host2 cost 100
! tiny3: host5 -> host3 cost 100
! tiny4: host5 -> host4 cost 100
provision:
"""
return (stateA, stateB, expected_path)
def case_chain6():
# N.B. 256MB required for dom0 (hardcoded).
# For the sake of easy maths in the example
# we create 1000MB available for domUs.
host1 = VMhost('host1', 'x86_64', 1256)
host2 = VMhost('host2', 'x86_64', 1256)
host3 = VMhost('host3', 'x86_64', 1256)
host4 = VMhost('host4', 'x86_64', 1256)
host5 = VMhost('host5', 'x86_64', 1256)
host6 = VMhost('host6', 'x86_64', 1256)
host7 = VMhost('host7', 'x86_64', 1256)
big1 = VM('big1', 'x86_64', 500)
big2 = VM('big2', 'x86_64', 510)
big3 = VM('big3', 'x86_64', 520)
big4 = VM('big4', 'x86_64', 530)
big5 = VM('big5', 'x86_64', 540)
big6 = VM('big6', 'x86_64', 550)
small1 = VM('small1', 'x86_64', 350)
small2 = VM('small2', 'x86_64', 360)
small3 = VM('small3', 'x86_64', 370)
small4 = VM('small4', 'x86_64', 380)
small5 = VM('small5', 'x86_64', 390)
small6 = VM('small6', 'x86_64', 400)
tiny1 = VM('tiny1', 'x86_64', 100)
tiny2 = VM('tiny2', 'x86_64', 100)
tiny3 = VM('tiny3', 'x86_64', 100)
tiny4 = VM('tiny4', 'x86_64', 100)
tiny5 = VM('tiny5', 'x86_64', 100)
tiny6 = VM('tiny6', 'x86_64', 100)
stateA = {
'host1' : [ big1, small1 ],
'host2' : [ big2, small2 ],
'host3' : [ big3, small3 ],
'host4' : [ big4, small4 ],
'host5' : [ big5, small5 ],
'host6' : [ big6, small6 ],
'host7' : [ tiny1, tiny2, tiny3, tiny4 ],
}
stateB = {
'host1' : [ big1, small6, tiny1 ],
'host2' : [ big2, small5, tiny2 ],
'host3' : [ big3, small4, tiny3 ],
'host4' : [ big4, small3, tiny4 ],
'host5' : [ big5, small2, tiny5 ],
'host6' : [ ],
'host7' : [ ],
}
expected_path = """\
shutdown: big6, small1
! big2: host2 -> host7 cost 510
! small5: host5 -> host2 cost 390
! small2: host2 -> host5 cost 360
! big2: host7 -> host2 cost 510
! big3: host3 -> host7 cost 520
! small4: host4 -> host3 cost 380
! small3: host3 -> host4 cost 370
! big3: host7 -> host3 cost 520
! small6: host6 -> host1 cost 400
! tiny1: host7 -> host1 cost 100
! tiny2: host7 -> host2 cost 100
! tiny3: host7 -> host3 cost 100
! tiny4: host7 -> host4 cost 100
provision: tiny5
"""
return (stateA, stateB, expected_path)
def case_simple_deadlock():
testcases.utils.create_vmhosts(2, 'x86_64', 4096)
vm1 = VM('vm1', 'x86_64', 3256)
vm2 = VM('vm2', 'x86_64', 3256)
stateA = {
'host1' : [ vm1 ],
'host2' : [ vm2 ],
}
stateB = {
'host1' : [ vm2 ],
'host2' : [ vm1 ],
}
expected_path = None
return (stateA, stateB, expected_path)
def case_weird():
testcases.utils.create_vmhosts(3, 'x86_64', 4096, 300)
vm1 = VM('vm1', 'x86_64', 892)
vm2 = VM('vm2', 'x86_64', 2542)
vm3 = VM('vm3', 'x86_64', 3039)
vm4 = VM('vm4', 'x86_64', 437)
stateA = {
'host1' : [ ],
'host2' : [ vm3, vm4 ],
'host3' : [ vm1, vm2 ],
}
stateB = {
'host1' : [ vm2 ],
'host2' : [ vm1, vm4 ],
'host3' : [ vm3 ],
}
expected_path = """\
shutdown:
! vm2: host3 -> host1 cost 2542
! vm1: host3 -> host1 cost 892
! vm3: host2 -> host3 cost 3039
! vm1: host1 -> host2 cost 892
provision:
"""
return (stateA, stateB, expected_path)
def case_circles():
"""This one would go around in circles if we allowed moving of the
same VM twice in a row.
"""
testcases.utils.create_vmhosts(3, 'x86_64', 4096, 280)
vm1 = VM('vm1', 'x86_64', 1280)
vm2 = VM('vm2', 'x86_64', 593)
vm3 = VM('vm3', 'x86_64', 1479)
vm4 = VM('vm4', 'x86_64', 2332)
vm5 = VM('vm5', 'x86_64', 2799)
stateA = {
'host1' : [ vm5 ],
'host2' : [ vm4 ],
'host3' : [ vm1, vm2, vm3 ],
}
stateB = {
'host1' : [ vm5 ],
'host2' : [ vm1, vm2 ],
'host3' : [ vm3, vm4 ],
}
expected_path = """\
shutdown:
! vm1: host3 -> host2 cost 1280
! vm2: host3 -> host1 cost 593
! vm4: host2 -> host3 cost 2332
! vm2: host1 -> host2 cost 593
provision:
"""
return (stateA, stateB, expected_path)
def case_slow():
"""This one is sloooooow.
"""
testcases.utils.create_vmhosts(5, 'x86_64', 4096, 280)
vm1 = VM('vm1', 'x86_64', 721)
vm2 = VM('vm2', 'x86_64', 2347)
vm3 = VM('vm3', 'x86_64', 175)
vm4 = VM('vm4', 'x86_64', 329)
vm5 = VM('vm5', 'x86_64', 750)
vm6 = VM('vm6', 'x86_64', 687)
vm7 = VM('vm7', 'x86_64', 2752)
vm8 = VM('vm8', 'x86_64', 3199)
vm9 = VM('vm9', 'x86_64', 3532)
stateA = {
'host1' : [ vm9 ],
'host2' : [ vm8 ],
'host3' : [ vm7 ],
'host4' : [ vm4, vm5, vm6 ],
'host5' : [ vm1, vm2, vm3 ],
}
stateB = {
'host1' : [ vm3, vm9 ],
'host2' : [ vm1, vm4, vm5, vm6 ],
'host3' : [ vm2 ],
'host4' : [ vm7 ],
'host5' : [ vm8 ],
}
expected_path = """\
shutdown:
! vm4: host4 -> host2 cost 329
! vm6: host4 -> host3 cost 687
! vm7: host3 -> host4 cost 2752
! vm2: host5 -> host3 cost 2347
! vm3: host5 -> host1 cost 175
! vm1: host5 -> host3 cost 721
! vm8: host2 -> host5 cost 3199
! vm5: host4 -> host2 cost 750
! vm1: host3 -> host2 cost 721
! vm6: host3 -> host2 cost 687
provision:
"""
return (stateA, stateB, expected_path)
|
|
# Copyright 2008-2011 WebDriver committers
# Copyright 2008-2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The WebDriver implementation.
"""
import base64
from command import Command
from webelement import WebElement
from remote_connection import RemoteConnection
from errorhandler import ErrorHandler
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.alert import Alert
class WebDriver(object):
"""
Controls a browser by sending commands to a remote server.
This server is expected to be running the WebDriver wire protocol as defined
here: http://code.google.com/p/selenium/wiki/JsonWireProtocol
:Attributes:
- command_executor - The command.CommandExecutor object used to execute commands.
- error_handler - errorhandler.ErrorHandler object used to verify that the server did not return an error.
- session_id - The session ID to send with every command.
- capabilities - A dictionary of capabilities of the underlying browser for this instance's session.
"""
def __init__(self, command_executor='http://127.0.0.1:4444/wd/hub',
desired_capabilities=None, browser_profile=None):
"""
Create a new driver that will issue commands using the wire protocol.
:Args:
- command_executor - Either a command.CommandExecutor object or a string that specifies the URL of a remote server to send commands to.
- desired_capabilities - Dictionary holding predefined values for starting a browser
- browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object. Only used if Firefox is requested.
"""
if desired_capabilities is None:
raise WebDriverException("Desired Capabilities can't be None")
if not isinstance(desired_capabilities, dict):
raise WebDriverException("Desired Capabilities must be a dictionary")
self.command_executor = command_executor
if type(self.command_executor) is str or type(self.command_executor) is unicode:
self.command_executor = RemoteConnection(command_executor)
self.session_id = None
self.capabilities = {}
self.error_handler = ErrorHandler()
self.start_client()
self.start_session(desired_capabilities, browser_profile)
@property
def name(self):
"""Returns the name of the underlying browser for this instance.
:Usage:
- driver.name
"""
if 'browserName' in self.capabilities:
return self.capabilities['browserName']
else:
raise KeyError('browserName not specified in session capabilities')
def start_client(self):
"""
Called before starting a new session. This method may be overridden
to define custom startup behavior.
"""
pass
def stop_client(self):
"""
Called after executing a quit command. This method may be overridden
to define custom shutdown behavior.
"""
pass
def start_session(self, desired_capabilities, browser_profile=None):
"""
Creates a new session with the desired capabilities.
:Args:
- browser_name - The name of the browser to request.
- version - Which browser version to request.
- platform - Which platform to request the browser on.
- javascript_enabled - Whether the new session should support JavaScript.
- browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object. Only used if Firefox is requested.
"""
if browser_profile:
desired_capabilities['firefox_profile'] = browser_profile.encoded
response = self.execute(Command.NEW_SESSION, {
'desiredCapabilities': desired_capabilities,
})
self.session_id = response['sessionId']
self.capabilities = response['value']
def _wrap_value(self, value):
if isinstance(value, dict):
converted = {}
for key, val in value.items():
converted[key] = self._wrap_value(val)
return converted
elif isinstance(value, WebElement):
return {'ELEMENT': value.id}
elif isinstance(value, list):
return list(self._wrap_value(item) for item in value)
else:
return value
def create_web_element(self, element_id):
"""
Creates a web element with the specified element_id.
"""
return WebElement(self, element_id)
def _unwrap_value(self, value):
if isinstance(value, dict) and 'ELEMENT' in value:
return self.create_web_element(value['ELEMENT'])
elif isinstance(value, list):
return list(self._unwrap_value(item) for item in value)
else:
return value
def execute(self, driver_command, params=None):
"""
Sends a command to be executed by a command.CommandExecutor.
:Args:
- driver_command: The name of the command to execute as a string.
- params: A dictionary of named parameters to send with the command.
:Returns:
The command's JSON response loaded into a dictionary object.
"""
if not params:
params = {'sessionId': self.session_id}
elif 'sessionId' not in params:
params['sessionId'] = self.session_id
params = self._wrap_value(params)
response = self.command_executor.execute(driver_command, params)
if response:
self.error_handler.check_response(response)
response['value'] = self._unwrap_value(
response.get('value', None))
return response
# If the server doesn't send a response, assume the command was
# a success
return {'success': 0, 'value': None, 'sessionId': self.session_id}
def get(self, url):
"""
Loads a web page in the current browser session.
"""
self.execute(Command.GET, {'url': url})
@property
def title(self):
"""Returns the title of the current page.
:Usage:
driver.title
"""
resp = self.execute(Command.GET_TITLE)
return resp['value'] if resp['value'] is not None else ""
def find_element_by_id(self, id_):
"""Finds an element by id.
:Args:
- id\_ - The id of the element to be found.
:Usage:
driver.find_element_by_id('foo')
"""
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
"""
Finds multiple elements by id.
:Args:
- id\_ - The id of the elements to be found.
:Usage:
driver.find_element_by_id('foo')
"""
return self.find_elements(by=By.ID, value=id_)
def find_element_by_xpath(self, xpath):
"""
Finds an element by xpath.
:Args:
- xpath - The xpath locator of the element to find.
:Usage:
driver.find_element_by_xpath('//div/td[1]')
"""
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
"""
Finds multiple elements by xpath.
:Args:
- xpath - The xpath locator of the elements to be found.
:Usage:
driver.find_elements_by_xpath("//div[contains(@class, 'foo')]")
"""
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_link_text(self, link_text):
"""
Finds an element by link text.
:Args:
- link_text: The text of the element to be found.
:Usage:
driver.find_element_by_link_text('Sign In')
"""
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, text):
"""
Finds elements by link text.
:Args:
- link_text: The text of the elements to be found.
:Usage:
driver.find_elements_by_link_text('Sign In')
"""
return self.find_elements(by=By.LINK_TEXT, value=text)
def find_element_by_partial_link_text(self, link_text):
"""
Finds an element by a partial match of its link text.
:Args:
- link_text: The text of the element to partially match on.
:Usage:
driver.find_element_by_partial_link_text('Sign')
"""
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
"""
Finds elements by a partial match of their link text.
:Args:
- link_text: The text of the element to partial match on.
:Usage:
driver.find_element_by_partial_link_text('Sign')
"""
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_name(self, name):
"""
Finds an element by name.
:Args:
- name: The name of the element to find.
:Usage:
driver.find_element_by_name('foo')
"""
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
"""
Finds elements by name.
:Args:
- name: The name of the elements to find.
:Usage:
driver.find_elements_by_name('foo')
"""
return self.find_elements(by=By.NAME, value=name)
def find_element_by_tag_name(self, name):
"""
Finds an element by tag name.
:Args:
- name: The tag name of the element to find.
:Usage:
driver.find_element_by_tag_name('foo')
"""
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
"""
Finds elements by tag name.
:Args:
- name: The tag name the use when finding elements.
:Usage:
driver.find_elements_by_tag_name('foo')
"""
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_class_name(self, name):
"""
Finds an element by class name.
:Args:
- name: The class name of the element to find.
:Usage:
driver.find_element_by_class_name('foo')
"""
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
"""
Finds elements by class name.
:Args:
- name: The class name of the elements to find.
:Usage:
driver.find_elements_by_class_name('foo')
"""
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
"""
Finds an element by css selector.
:Args:
- css_selector: The css selector to use when finding elements.
:Usage:
driver.find_element_by_css_selector('#foo')
"""
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
"""
Finds elements by css selector.
:Args:
- css_selector: The css selector to use when finding elements.
:Usage:
driver.find_element_by_css_selector('#foo')
"""
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def execute_script(self, script, *args):
"""
Synchronously Executes JavaScript in the current window/frame.
:Args:
- script: The JavaScript to execute.
- \*args: Any applicable arguments for your JavaScript.
:Usage:
driver.execute_script('document.title')
"""
if len(args) == 1:
converted_args = args[0]
else:
converted_args = list(args)
converted_args = list(args)
return self.execute(Command.EXECUTE_SCRIPT,
{'script': script, 'args':converted_args})['value']
def execute_async_script(self, script, *args):
"""
Asynchronously Executes JavaScript in the current window/frame.
:Args:
- script: The JavaScript to execute.
- \*args: Any applicable arguments for your JavaScript.
:Usage:
driver.execute_async_script('document.title')
"""
if len(args) == 1:
converted_args = args[0]
else:
converted_args = list(args)
converted_args = list(args)
return self.execute(Command.EXECUTE_ASYNC_SCRIPT,
{'script': script, 'args':converted_args})['value']
@property
def current_url(self):
"""
Gets the URL of the current page.
:Usage:
driver.current_url
"""
return self.execute(Command.GET_CURRENT_URL)['value']
@property
def page_source(self):
"""
Gets the source of the current page.
:Usage:
driver.page_source
"""
return self.execute(Command.GET_PAGE_SOURCE)['value']
def close(self):
"""
Closes the current window.
:Usage:
driver.close()
"""
self.execute(Command.CLOSE)
def quit(self):
"""
Quits the driver and closes every associated window.
:Usage:
driver.quit()
"""
try:
self.execute(Command.QUIT)
finally:
self.stop_client()
@property
def current_window_handle(self):
"""
Returns the handle of the current window.
:Usage:
driver.current_window_handle
"""
return self.execute(Command.GET_CURRENT_WINDOW_HANDLE)['value']
@property
def window_handles(self):
"""
Returns the handles of all windows within the current session.
:Usage:
driver.window_handles
"""
return self.execute(Command.GET_WINDOW_HANDLES)['value']
def maximize_window(self):
"""
Maximizes the current window that webdriver is using
"""
self.execute(Command.MAXIMIZE_WINDOW, {"windowHandle": "current"})
#Target Locators
def switch_to_active_element(self):
"""
Returns the element with focus, or BODY if nothing has focus.
:Usage:
driver.switch_to_active_element()
"""
return self.execute(Command.GET_ACTIVE_ELEMENT)['value']
def switch_to_window(self, window_name):
"""
Switches focus to the specified window.
:Args:
- window_name: The name or window handle of the window to switch to.
:Usage:
driver.switch_to_window('main')
"""
self.execute(Command.SWITCH_TO_WINDOW, {'name': window_name})
def switch_to_frame(self, frame_reference):
"""
Switches focus to the specified frame, by index, name, or webelement.
:Args:
- frame_reference: The name of the window to switch to, an integer representing the index,
or a webelement that is an (i)frame to switch to.
:Usage:
driver.switch_to_frame('frame_name')
driver.switch_to_frame(1)
driver.switch_to_frame(driver.find_elements_by_tag_name("iframe")[0])
"""
self.execute(Command.SWITCH_TO_FRAME, {'id': frame_reference})
def switch_to_default_content(self):
"""
Switch focus to the default frame.
:Usage:
driver.switch_to_default_content()
"""
self.execute(Command.SWITCH_TO_FRAME, {'id': None})
def switch_to_alert(self):
"""
Switches focus to an alert on the page.
:Usage:
driver.switch_to_alert()
"""
return Alert(self)
#Navigation
def back(self):
"""
Goes one step backward in the browser history.
:Usage:
driver.back()
"""
self.execute(Command.GO_BACK)
def forward(self):
"""
Goes one step forward in the browser history.
:Usage:
driver.forward()
"""
self.execute(Command.GO_FORWARD)
def refresh(self):
"""
Refreshes the current page.
:Usage:
driver.refresh()
"""
self.execute(Command.REFRESH)
# Options
def get_cookies(self):
"""
Returns a set of dictionaries, corresponding to cookies visible in the current session.
:Usage:
driver.get_cookies()
"""
return self.execute(Command.GET_ALL_COOKIES)['value']
def get_cookie(self, name):
"""
Get a single cookie by name. Returns the cookie if found, None if not.
:Usage:
driver.get_cookie('my_cookie')
"""
cookies = self.get_cookies()
for cookie in cookies:
if cookie['name'] == name:
return cookie
return None
def delete_cookie(self, name):
"""
Deletes a single cookie with the given name.
:Usage:
driver.delete_cookie('my_cookie')
"""
self.execute(Command.DELETE_COOKIE, {'name': name})
def delete_all_cookies(self):
"""
Delete all cookies in the scope of the session.
:Usage:
driver.delete_all_cookies()
"""
self.execute(Command.DELETE_ALL_COOKIES)
def add_cookie(self, cookie_dict):
"""
Adds a cookie to your current session.
:Args:
- cookie_dict: A dictionary object, with required keys - "name" and "value";
optional keys - "path", "domain", "secure", "expiry"
Usage:
driver.add_cookie({'name' : 'foo', 'value' : 'bar'})
driver.add_cookie({'name' : 'foo', 'value' : 'bar', 'path' : '/'})
driver.add_cookie({'name' : 'foo', 'value' : 'bar', 'path' : '/', 'secure':True})
"""
self.execute(Command.ADD_COOKIE, {'cookie': cookie_dict})
# Timeouts
def implicitly_wait(self, time_to_wait):
"""
Sets a sticky timeout to implicitly wait for an element to be found,
or a command to complete. This method only needs to be called one
time per session. To set the timeout for calls to
execute_async_script, see set_script_timeout.
:Args:
- time_to_wait: Amount of time to wait (in seconds)
:Usage:
driver.implicitly_wait(30)
"""
self.execute(Command.IMPLICIT_WAIT, {'ms': float(time_to_wait) * 1000})
def set_script_timeout(self, time_to_wait):
"""
Set the amount of time that the script should wait during an
execute_async_script call before throwing an error.
:Args:
- time_to_wait: The amount of time to wait
:Usage:
driver.set_script_timeout(30)
"""
self.execute(Command.SET_SCRIPT_TIMEOUT,
{'ms': float(time_to_wait) * 1000})
def find_element(self, by=By.ID, value=None):
"""
'Private' method used by the find_element_by_* methods.
:Usage:
Use the corresponding find_element_by_* instead of this.
"""
return self.execute(Command.FIND_ELEMENT,
{'using': by, 'value': value})['value']
def find_elements(self, by=By.ID, value=None):
"""
'Private' method used by the find_elements_by_* methods.
:Usage:
Use the corresponding find_elements_by_* instead of this.
"""
return self.execute(Command.FIND_ELEMENTS,
{'using': by, 'value': value})['value']
@property
def desired_capabilities(self):
"""
returns the drivers current desired capabilities being used
"""
return self.capabilities
def get_screenshot_as_file(self, filename):
"""
Gets the screenshot of the current window. Returns False if there is
any IOError, else returns True. Use full paths in your filename.
:Args:
- filename: The full path you wish to save your screenshot to.
:Usage:
driver.get_screenshot_as_file('/Screenshots/foo.png')
"""
png = self.execute(Command.SCREENSHOT)['value']
try:
with open(filename, 'wb') as f:
f.write(base64.decodestring(png))
except IOError:
return False
del png
return True
def get_screenshot_as_base64(self):
"""
Gets the screenshot of the current window as a base64 encoded string
which is useful in embedded images in HTML.
:Usage:
driver.get_screenshot_as_base64()
"""
return self.execute(Command.SCREENSHOT)['value']
def set_window_size(self, width, height, windowHandle='current'):
"""
Sets the width and height of the current window. (window.resizeTo)
:Args:
- width: the width in pixels to set the window to
- height: the height in pixels to set the window to
:Usage:
driver.set_window_size(800,600)
"""
self.execute(Command.SET_WINDOW_SIZE, {'width': width, 'height': height,
'windowHandle': windowHandle})
def get_window_size(self, windowHandle='current'):
"""
Gets the width and height of the current window.
:Usage:
driver.get_window_size()
"""
return self.execute(Command.GET_WINDOW_SIZE,
{'windowHandle': windowHandle})['value']
def set_window_position(self, x, y, windowHandle='current'):
"""
Sets the x,y position of the current window. (window.moveTo)
:Args:
- x: the x-coordinate in pixels to set the window position
- y: the y-coordinate in pixels to set the window position
:Usage:
driver.set_window_position(0,0)
"""
self.execute(Command.SET_WINDOW_POSITION, {'x': x, 'y': y,
'windowHandle': windowHandle})
def get_window_position(self, windowHandle='current'):
"""
Gets the x,y position of the current window.
:Usage:
driver.get_window_position()
"""
return self.execute(Command.GET_WINDOW_POSITION,
{'windowHandle': windowHandle})['value']
@property
def orientation(self):
"""
Gets the current orientation of the device
:Usage:
orientation = driver.orientation
"""
return self.execute(Command.GET_SCREEN_ORIENTATION)['value']
@orientation.setter
def orientation(self, value):
"""
Sets the current orientation of the device
:Args:
- value: orientation to set it to.
:Usage:
driver.orientation = 'landscape'
"""
allowed_values = ['LANDSCAPE', 'PORTRAIT']
if value.upper() in allowed_values:
self.execute(Command.SET_SCREEN_ORIENTATION, {'orientation': value})['value']
else:
raise WebDriverException("You can only set the orientation to 'LANDSCAPE' and 'PORTRAIT'")
|
|
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ipaddress
import netaddr
import six
import testtools
from tempest.api.network import base_security_groups as sec_base
from tempest.common import custom_matchers
from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
class PortsTestJSON(sec_base.BaseSecGroupTest):
"""Test the following operations for ports:
port create
port delete
port list
port show
port update
"""
@classmethod
def resource_setup(cls):
super(PortsTestJSON, cls).resource_setup()
cls.network = cls.create_network()
cls.port = cls.create_port(cls.network)
def _delete_port(self, port_id):
self.ports_client.delete_port(port_id)
body = self.ports_client.list_ports()
ports_list = body['ports']
self.assertFalse(port_id in [n['id'] for n in ports_list])
def _create_subnet(self, network, gateway='',
cidr=None, mask_bits=None, **kwargs):
subnet = self.create_subnet(network, gateway, cidr, mask_bits)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.subnets_client.delete_subnet, subnet['id'])
return subnet
def _create_network(self, network_name=None, **kwargs):
network_name = network_name or data_utils.rand_name(
self.__class__.__name__)
network = self.networks_client.create_network(
name=network_name, **kwargs)['network']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.networks_client.delete_network,
network['id'])
return network
@decorators.attr(type='smoke')
@decorators.idempotent_id('c72c1c0c-2193-4aca-aaa4-b1442640f51c')
def test_create_update_delete_port(self):
# Verify port creation
body = self.ports_client.create_port(
network_id=self.network['id'],
name=data_utils.rand_name(self.__class__.__name__))
port = body['port']
# Schedule port deletion with verification upon test completion
self.addCleanup(self._delete_port, port['id'])
self.assertTrue(port['admin_state_up'])
# Verify port update
new_name = "New_Port"
body = self.ports_client.update_port(port['id'],
name=new_name,
admin_state_up=False)
updated_port = body['port']
self.assertEqual(updated_port['name'], new_name)
self.assertFalse(updated_port['admin_state_up'])
@decorators.idempotent_id('67f1b811-f8db-43e2-86bd-72c074d4a42c')
def test_create_bulk_port(self):
network1 = self.network
network2 = self._create_network()
network_list = [network1['id'], network2['id']]
port_list = [{'network_id': net_id} for net_id in network_list]
body = self.ports_client.create_bulk_ports(ports=port_list)
created_ports = body['ports']
port1 = created_ports[0]
port2 = created_ports[1]
self.addCleanup(self._delete_port, port1['id'])
self.addCleanup(self._delete_port, port2['id'])
self.assertEqual(port1['network_id'], network1['id'])
self.assertEqual(port2['network_id'], network2['id'])
self.assertTrue(port1['admin_state_up'])
self.assertTrue(port2['admin_state_up'])
@decorators.attr(type='smoke')
@decorators.idempotent_id('0435f278-40ae-48cb-a404-b8a087bc09b1')
def test_create_port_in_allowed_allocation_pools(self):
network = self._create_network()
net_id = network['id']
address = self.cidr
address.prefixlen = self.mask_bits
if ((address.version == 4 and address.prefixlen >= 30) or
(address.version == 6 and address.prefixlen >= 126)):
msg = ("Subnet %s isn't large enough for the test" % address.cidr)
raise exceptions.InvalidConfiguration(msg)
allocation_pools = {'allocation_pools': [{'start': str(address[2]),
'end': str(address[-2])}]}
self._create_subnet(network, cidr=address,
mask_bits=address.prefixlen,
**allocation_pools)
body = self.ports_client.create_port(
network_id=net_id,
name=data_utils.rand_name(self.__class__.__name__))
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.ports_client.delete_port, body['port']['id'])
port = body['port']
ip_address = port['fixed_ips'][0]['ip_address']
start_ip_address = allocation_pools['allocation_pools'][0]['start']
end_ip_address = allocation_pools['allocation_pools'][0]['end']
ip_range = netaddr.IPRange(start_ip_address, end_ip_address)
self.assertIn(ip_address, ip_range)
self.ports_client.delete_port(port['id'])
@decorators.attr(type='smoke')
@decorators.idempotent_id('c9a685bd-e83f-499c-939f-9f7863ca259f')
def test_show_port(self):
# Verify the details of port
body = self.ports_client.show_port(self.port['id'])
port = body['port']
self.assertIn('id', port)
# NOTE(rfolco): created_at and updated_at may get inconsistent values
# due to possible delay between POST request and resource creation.
# TODO(rfolco): Neutron Bug #1365341 is fixed, can remove the key
# extra_dhcp_opts in the O release (K/L gate jobs still need it).
self.assertThat(self.port,
custom_matchers.MatchesDictExceptForKeys
(port, excluded_keys=['extra_dhcp_opts',
'created_at',
'updated_at']))
@decorators.idempotent_id('45fcdaf2-dab0-4c13-ac6c-fcddfb579dbd')
def test_show_port_fields(self):
# Verify specific fields of a port
fields = ['id', 'mac_address']
body = self.ports_client.show_port(self.port['id'],
fields=fields)
port = body['port']
self.assertEqual(sorted(port.keys()), sorted(fields))
for field_name in fields:
self.assertEqual(port[field_name], self.port[field_name])
@decorators.attr(type='smoke')
@decorators.idempotent_id('cf95b358-3e92-4a29-a148-52445e1ac50e')
def test_list_ports(self):
# Verify the port exists in the list of all ports
body = self.ports_client.list_ports()
ports = [port['id'] for port in body['ports']
if port['id'] == self.port['id']]
self.assertNotEmpty(ports, "Created port not found in the list")
@decorators.idempotent_id('e7fe260b-1e79-4dd3-86d9-bec6a7959fc5')
def test_port_list_filter_by_ip(self):
# Create network and subnet
network = self._create_network()
self._create_subnet(network)
# Create two ports
port_1 = self.ports_client.create_port(
network_id=network['id'],
name=data_utils.rand_name(self.__class__.__name__))
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.ports_client.delete_port, port_1['port']['id'])
port_2 = self.ports_client.create_port(
network_id=network['id'],
name=data_utils.rand_name(self.__class__.__name__))
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.ports_client.delete_port, port_2['port']['id'])
# List ports filtered by fixed_ips
port_1_fixed_ip = port_1['port']['fixed_ips'][0]['ip_address']
fixed_ips = 'ip_address=' + port_1_fixed_ip
port_list = self.ports_client.list_ports(fixed_ips=fixed_ips)
# Check that we got the desired port
ports = port_list['ports']
tenant_ids = set([port['tenant_id'] for port in ports])
self.assertEqual(len(tenant_ids), 1,
'Ports from multiple tenants are in the list resp')
port_ids = [port['id'] for port in ports]
fixed_ips = [port['fixed_ips'] for port in ports]
port_ips = []
for addr in fixed_ips:
port_ips.extend([port['ip_address'] for port in addr])
port_net_ids = [port['network_id'] for port in ports]
self.assertIn(port_1['port']['id'], port_ids)
self.assertIn(port_1_fixed_ip, port_ips)
self.assertIn(network['id'], port_net_ids)
@decorators.idempotent_id('79895408-85d5-460d-94e7-9531c5fd9123')
@testtools.skipUnless(
utils.is_extension_enabled('ip-substring-filtering', 'network'),
'ip-substring-filtering extension not enabled.')
def test_port_list_filter_by_ip_substr(self):
# Create network and subnet
network = self._create_network()
subnet = self._create_subnet(network)
# Get two IP addresses
ip_address_1 = None
ip_address_2 = None
ip_network = ipaddress.ip_network(six.text_type(subnet['cidr']))
for ip in ip_network:
if ip == ip_network.network_address:
continue
if ip_address_1 is None:
ip_address_1 = six.text_type(ip)
else:
ip_address_2 = ip_address_1
ip_address_1 = six.text_type(ip)
# Make sure these two IP addresses have different substring
if ip_address_1[:-1] != ip_address_2[:-1]:
break
# Create two ports
fixed_ips = [{'subnet_id': subnet['id'], 'ip_address': ip_address_1}]
port_1 = self.ports_client.create_port(
network_id=network['id'],
name=data_utils.rand_name(self.__class__.__name__),
fixed_ips=fixed_ips)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.ports_client.delete_port, port_1['port']['id'])
fixed_ips = [{'subnet_id': subnet['id'], 'ip_address': ip_address_2}]
port_2 = self.ports_client.create_port(
network_id=network['id'],
name=data_utils.rand_name(self.__class__.__name__),
fixed_ips=fixed_ips)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.ports_client.delete_port, port_2['port']['id'])
# Scenario 1: List port1 (port2 is filtered out)
if ip_address_1[:-1] != ip_address_2[:-1]:
ips_filter = 'ip_address_substr=' + ip_address_1[:-1]
else:
ips_filter = 'ip_address_substr=' + ip_address_1
ports = self.ports_client.list_ports(fixed_ips=ips_filter)['ports']
# Check that we got the desired port
port_ids = [port['id'] for port in ports]
fixed_ips = [port['fixed_ips'] for port in ports]
port_ips = []
for addr in fixed_ips:
port_ips.extend([a['ip_address'] for a in addr])
port_net_ids = [port['network_id'] for port in ports]
self.assertIn(network['id'], port_net_ids)
self.assertIn(port_1['port']['id'], port_ids)
self.assertIn(port_1['port']['fixed_ips'][0]['ip_address'], port_ips)
self.assertNotIn(port_2['port']['id'], port_ids)
self.assertNotIn(
port_2['port']['fixed_ips'][0]['ip_address'], port_ips)
# Scenario 2: List both port1 and port2
substr = ip_address_1
while substr not in ip_address_2:
substr = substr[:-1]
ips_filter = 'ip_address_substr=' + substr
ports = self.ports_client.list_ports(fixed_ips=ips_filter)['ports']
# Check that we got both port
port_ids = [port['id'] for port in ports]
fixed_ips = [port['fixed_ips'] for port in ports]
port_ips = []
for addr in fixed_ips:
port_ips.extend([a['ip_address'] for a in addr])
port_net_ids = [port['network_id'] for port in ports]
self.assertIn(network['id'], port_net_ids)
self.assertIn(port_1['port']['id'], port_ids)
self.assertIn(port_1['port']['fixed_ips'][0]['ip_address'], port_ips)
self.assertIn(port_2['port']['id'], port_ids)
self.assertIn(port_2['port']['fixed_ips'][0]['ip_address'], port_ips)
@decorators.idempotent_id('5ad01ed0-0e6e-4c5d-8194-232801b15c72')
def test_port_list_filter_by_router_id(self):
# Create a router
network = self._create_network()
self._create_subnet(network)
router = self.create_router()
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.routers_client.delete_router, router['id'])
port = self.ports_client.create_port(
network_id=network['id'],
name=data_utils.rand_name(self.__class__.__name__))
# Add router interface to port created above
self.routers_client.add_router_interface(router['id'],
port_id=port['port']['id'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.routers_client.remove_router_interface,
router['id'], port_id=port['port']['id'])
# List ports filtered by router_id
port_list = self.ports_client.list_ports(device_id=router['id'])
ports = port_list['ports']
self.assertEqual(len(ports), 1)
self.assertEqual(ports[0]['id'], port['port']['id'])
self.assertEqual(ports[0]['device_id'], router['id'])
@decorators.idempotent_id('ff7f117f-f034-4e0e-abff-ccef05c454b4')
def test_list_ports_fields(self):
# Verify specific fields of ports
fields = ['id', 'mac_address']
body = self.ports_client.list_ports(fields=fields)
ports = body['ports']
self.assertNotEmpty(ports, "Port list returned is empty")
# Asserting the fields returned are correct
for port in ports:
self.assertEqual(sorted(fields), sorted(port.keys()))
@decorators.idempotent_id('63aeadd4-3b49-427f-a3b1-19ca81f06270')
def test_create_update_port_with_second_ip(self):
# Create a network with two subnets
network = self._create_network()
subnet_1 = self._create_subnet(network)
subnet_2 = self._create_subnet(network)
fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
fixed_ips = fixed_ip_1 + fixed_ip_2
# Create a port with multiple IP addresses
port = self.create_port(network,
fixed_ips=fixed_ips)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.ports_client.delete_port, port['id'])
self.assertEqual(2, len(port['fixed_ips']))
check_fixed_ips = [subnet_1['id'], subnet_2['id']]
for item in port['fixed_ips']:
self.assertIn(item['subnet_id'], check_fixed_ips)
# Update the port to return to a single IP address
port = self.update_port(port, fixed_ips=fixed_ip_1)
self.assertEqual(1, len(port['fixed_ips']))
# Update the port with a second IP address from second subnet
port = self.update_port(port, fixed_ips=fixed_ips)
self.assertEqual(2, len(port['fixed_ips']))
def _update_port_with_security_groups(self, security_groups_names):
subnet_1 = self._create_subnet(self.network)
fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
security_groups_list = list()
sec_grps_client = self.security_groups_client
for name in security_groups_names:
group_create_body = sec_grps_client.create_security_group(
name=name)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.security_groups_client.delete_security_group,
group_create_body['security_group']['id'])
security_groups_list.append(group_create_body['security_group']
['id'])
# Create a port
sec_grp_name = data_utils.rand_name('secgroup')
security_group = sec_grps_client.create_security_group(
name=sec_grp_name)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.security_groups_client.delete_security_group,
security_group['security_group']['id'])
post_body = {
"name": data_utils.rand_name(self.__class__.__name__),
"security_groups": [security_group['security_group']['id']],
"network_id": self.network['id'],
"admin_state_up": True,
"fixed_ips": fixed_ip_1}
body = self.ports_client.create_port(**post_body)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.ports_client.delete_port, body['port']['id'])
port = body['port']
# Update the port with security groups
subnet_2 = self.create_subnet(self.network)
fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
update_body = {
"name": data_utils.rand_name(self.__class__.__name__),
"admin_state_up": False,
"fixed_ips": fixed_ip_2,
"security_groups": security_groups_list}
body = self.ports_client.update_port(port['id'], **update_body)
port_show = body['port']
# Verify the security groups and other attributes updated to port
exclude_keys = set(port_show).symmetric_difference(update_body)
exclude_keys.add('fixed_ips')
exclude_keys.add('security_groups')
self.assertThat(port_show, custom_matchers.MatchesDictExceptForKeys(
update_body, exclude_keys))
self.assertEqual(fixed_ip_2[0]['subnet_id'],
port_show['fixed_ips'][0]['subnet_id'])
for security_group in security_groups_list:
self.assertIn(security_group, port_show['security_groups'])
@decorators.idempotent_id('58091b66-4ff4-4cc1-a549-05d60c7acd1a')
@testtools.skipUnless(
utils.is_extension_enabled('security-group', 'network'),
'security-group extension not enabled.')
def test_update_port_with_security_group_and_extra_attributes(self):
self._update_port_with_security_groups(
[data_utils.rand_name('secgroup')])
@decorators.idempotent_id('edf6766d-3d40-4621-bc6e-2521a44c257d')
@testtools.skipUnless(
utils.is_extension_enabled('security-group', 'network'),
'security-group extension not enabled.')
def test_update_port_with_two_security_groups_and_extra_attributes(self):
self._update_port_with_security_groups(
[data_utils.rand_name('secgroup'),
data_utils.rand_name('secgroup')])
@decorators.idempotent_id('13e95171-6cbd-489c-9d7c-3f9c58215c18')
def test_create_show_delete_port_user_defined_mac(self):
# Create a port for a legal mac
body = self.ports_client.create_port(
network_id=self.network['id'],
name=data_utils.rand_name(self.__class__.__name__))
old_port = body['port']
free_mac_address = old_port['mac_address']
self.ports_client.delete_port(old_port['id'])
# Create a new port with user defined mac
body = self.ports_client.create_port(
network_id=self.network['id'],
mac_address=free_mac_address,
name=data_utils.rand_name(self.__class__.__name__))
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.ports_client.delete_port, body['port']['id'])
port = body['port']
body = self.ports_client.show_port(port['id'])
show_port = body['port']
self.assertEqual(free_mac_address,
show_port['mac_address'])
@decorators.attr(type='smoke')
@decorators.idempotent_id('4179dcb9-1382-4ced-84fe-1b91c54f5735')
@testtools.skipUnless(
utils.is_extension_enabled('security-group', 'network'),
'security-group extension not enabled.')
def test_create_port_with_no_securitygroups(self):
network = self._create_network()
self._create_subnet(network)
port = self.create_port(network, security_groups=[])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.ports_client.delete_port, port['id'])
self.assertIsNotNone(port['security_groups'])
self.assertEmpty(port['security_groups'])
class PortsIpV6TestJSON(PortsTestJSON):
_ip_version = 6
|
|
import xml.etree.ElementTree as ET
import xml.dom.minidom as MD
from .testSuite import TestSuite
from .testCase import TestCase
from .utils import forceUnicode, cleanIllegalXmlChars
class TestReport(object):
class XmlDecodingFailure(Exception):
pass
class MergeFailure(Exception):
pass
def __init__(self, testSuites=None, **kwargs):
self.params = {
'time': None,
'name': None,
'tests': None,
'failures': None,
'errors': None,
'disabled': None,
'testSuites': [],
'timeAggregate': sum,
}
self.attributeNames = [
'time',
'name',
'tests',
'failures',
'errors',
'disabled',
]
if 'timeAggregate' in kwargs and kwargs['timeAggregate'] is not None:
self.params['timeAggregate'] = kwargs['timeAggregate']
if testSuites is not None and not isinstance(testSuites, list):
testSuites = [testSuites]
if testSuites is not None:
self.params['testSuites'] = testSuites
self._recalculateParams()
self.params.update(kwargs)
def toRawData(self):
testReportData = {
'testSuites': [],
}
for testSuite in self.params['testSuites']:
testSuiteData = {
'testCases': [],
}
for testCase in testSuite.params['testCases']:
testSuiteData['testCases'].append(testCase.params)
testSuiteData.update(dict([(k, v) for k, v in testSuite.params.items() if
k in testSuite.attributeNames]))
testReportData['testSuites'].append(testSuiteData)
testReportData.update(dict([(k, v) for k, v in self.params.items() if k in self.attributeNames]))
return testReportData
def toXml(self, prettyPrint=False, encoding=None):
testsuitesAttrib = dict([(key, forceUnicode(val, encoding)) for key, val in self.params.items() if
key in self.attributeNames and
val is not None])
testsuitesNode = ET.Element('testsuites', attrib=testsuitesAttrib)
for testSuite in self.params['testSuites']:
testsuiteAttrib = dict([(key, forceUnicode(val, encoding)) for key, val in testSuite.params.items() if
key in testSuite.attributeNames and
val is not None])
testsuiteNode = ET.SubElement(testsuitesNode, 'testsuite', attrib=testsuiteAttrib)
for testCase in testSuite.params['testCases']:
testcaseAttrib = dict([(key, forceUnicode(val, encoding)) for key, val in testCase.params.items() if
key in testCase.attributeNames and
val is not None])
testcaseNode = ET.SubElement(testsuiteNode, 'testcase', attrib=testcaseAttrib)
for childName in testCase.childNames.keys():
childAttrib = dict([(key.split('_')[1], forceUnicode(val, encoding)) for key, val in
testCase.params.items() if
key.startswith('%s_' % childName) and
val is not None])
if testCase.params[childName] is not None or len(childAttrib.items()) > 0:
childNode = ET.SubElement(testcaseNode, testCase.childNames[childName], attrib=childAttrib)
childNode.text = forceUnicode((testCase.params[childName]), encoding)
uglyXml = ET.tostring(testsuitesNode, encoding=encoding)
uglyXml = uglyXml.decode(encoding or 'utf-8')
uglyXml = cleanIllegalXmlChars(uglyXml)
if prettyPrint:
uglyXml = uglyXml.encode(encoding or 'utf-8')
xml = MD.parseString(uglyXml)
xml = xml.toprettyxml(encoding=encoding)
if encoding:
xml = xml.decode(encoding or 'utf-8')
return xml
return uglyXml
def fromXml(self, xmlStr, encoding=None):
self._clearAttributes()
xmlStr = xmlStr.encode(encoding or 'utf-8')
root = ET.fromstring(xmlStr)
if root.tag != 'testsuites':
raise self.XmlDecodingFailure
self._fillAttributes(root.attrib)
self.params['testSuites'] = []
for child in root:
if child.tag == 'testsuite':
testSuite = TestSuite()
testSuite._fillAttributes(child.attrib)
for subchild in child:
if subchild.tag == 'testcase':
testCase = TestCase()
testCase._fillAttributes(subchild.attrib)
for subsubchild in subchild:
if subsubchild.tag in testCase.childNames.values():
childNamesToParamNames = dict([(v, k) for k, v in testCase.childNames.items()])
paramName = childNamesToParamNames[subsubchild.tag]
testCase.params[paramName] = subsubchild.text
for attributeName, attributeValue in subsubchild.attrib.items():
testCase.params['%s_%s' % (paramName, attributeName)] = attributeValue
testSuite.params['testCases'].append(testCase)
testSuite._recalculateParams()
self.params['testSuites'].append(testSuite)
self._recalculateParams()
def merge(self, testReport, recalculate=True):
testSuiteNames = [ts.params['name'] for ts in self.params['testSuites'] if ts.params['name'] is not None]
testSuitesToAdd = [ts for ts in testReport.params['testSuites'] if ts.params['name'] not in testSuiteNames]
testSuitesToMerge = [ts for ts in testReport.params['testSuites'] if ts.params['name'] in testSuiteNames]
self.params['testSuites'] += testSuitesToAdd
[intTs.merge(extTs, recalculate) for intTs in self.params['testSuites'] for extTs in testSuitesToMerge if
intTs.params['name'] == extTs.params['name']]
if recalculate:
self._recalculateParams()
def __str__(self):
return str(self.params)
def _clearAttributes(self):
for attributeName in self.attributeNames:
self.params[attributeName] = None
def _fillAttributes(self, attributes):
for attributeName in self.attributeNames:
if attributeName in attributes:
self.params[attributeName] = attributes[attributeName]
def _recalculateParams(self):
def anything2int(anything):
try:
return int(anything)
except:
return None
def anything2float(anything):
try:
return float(anything)
except:
return None
timesInSuites = [anything2float(ts.params['time']) for ts in self.params['testSuites']]
timesInSuites = [time for time in timesInSuites if time is not None]
self.params['time'] = self.params['timeAggregate'](timesInSuites)
testsInSuites = [anything2int(ts.params['tests']) for ts in self.params['testSuites']]
testsInSuites = [tests for tests in testsInSuites if tests is not None]
self.params['tests'] = sum(testsInSuites)
failuresInSuites = [anything2int(ts.params['failures']) for ts in self.params['testSuites']]
failuresInSuites = [failures for failures in failuresInSuites if failures is not None]
self.params['failures'] = sum(failuresInSuites)
errorsInSuites = [anything2int(ts.params['errors']) for ts in self.params['testSuites']]
errorsInSuites = [errors for errors in errorsInSuites if errors is not None]
self.params['errors'] = sum(errorsInSuites)
|
|
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs all types of tests from one unified interface."""
import argparse
import collections
import logging
import os
import shutil
import signal
import sys
import threading
import unittest
from pylib import android_commands
from pylib import constants
from pylib import forwarder
from pylib import ports
from pylib.base import base_test_result
from pylib.base import environment_factory
from pylib.base import test_dispatcher
from pylib.base import test_instance_factory
from pylib.base import test_run_factory
from pylib.gtest import gtest_config
from pylib.gtest import setup as gtest_setup
from pylib.gtest import test_options as gtest_test_options
from pylib.linker import setup as linker_setup
from pylib.host_driven import setup as host_driven_setup
from pylib.instrumentation import setup as instrumentation_setup
from pylib.instrumentation import test_options as instrumentation_test_options
from pylib.junit import setup as junit_setup
from pylib.junit import test_dispatcher as junit_dispatcher
from pylib.monkey import setup as monkey_setup
from pylib.monkey import test_options as monkey_test_options
from pylib.perf import setup as perf_setup
from pylib.perf import test_options as perf_test_options
from pylib.perf import test_runner as perf_test_runner
from pylib.results import json_results
from pylib.results import report_results
from pylib.uiautomator import setup as uiautomator_setup
from pylib.uiautomator import test_options as uiautomator_test_options
from pylib.utils import apk_helper
from pylib.utils import base_error
from pylib.utils import reraiser_thread
from pylib.utils import run_tests_helper
def AddCommonOptions(parser):
"""Adds all common options to |parser|."""
group = parser.add_argument_group('Common Options')
default_build_type = os.environ.get('BUILDTYPE', 'Debug')
debug_or_release_group = group.add_mutually_exclusive_group()
debug_or_release_group.add_argument(
'--debug', action='store_const', const='Debug', dest='build_type',
default=default_build_type,
help=('If set, run test suites under out/Debug. '
'Default is env var BUILDTYPE or Debug.'))
debug_or_release_group.add_argument(
'--release', action='store_const', const='Release', dest='build_type',
help=('If set, run test suites under out/Release. '
'Default is env var BUILDTYPE or Debug.'))
group.add_argument('--build-directory', dest='build_directory',
help=('Path to the directory in which build files are'
' located (should not include build type)'))
group.add_argument('--output-directory', dest='output_directory',
help=('Path to the directory in which build files are'
' located (must include build type). This will take'
' precedence over --debug, --release and'
' --build-directory'))
group.add_argument('--num_retries', dest='num_retries', type=int, default=2,
help=('Number of retries for a test before '
'giving up (default: %(default)s).'))
group.add_argument('-v',
'--verbose',
dest='verbose_count',
default=0,
action='count',
help='Verbose level (multiple times for more)')
group.add_argument('--flakiness-dashboard-server',
dest='flakiness_dashboard_server',
help=('Address of the server that is hosting the '
'Chrome for Android flakiness dashboard.'))
group.add_argument('--enable-platform-mode', action='store_true',
help=('Run the test scripts in platform mode, which '
'conceptually separates the test runner from the '
'"device" (local or remote, real or emulated) on '
'which the tests are running. [experimental]'))
group.add_argument('-e', '--environment', default='local',
choices=constants.VALID_ENVIRONMENTS,
help='Test environment to run in (default: %(default)s).')
group.add_argument('--adb-path',
help=('Specify the absolute path of the adb binary that '
'should be used.'))
group.add_argument('--json-results-file', dest='json_results_file',
help='If set, will dump results in JSON form '
'to specified file.')
def ProcessCommonOptions(args):
"""Processes and handles all common options."""
run_tests_helper.SetLogLevel(args.verbose_count)
constants.SetBuildType(args.build_type)
if args.build_directory:
constants.SetBuildDirectory(args.build_directory)
if args.output_directory:
constants.SetOutputDirectort(args.output_directory)
if args.adb_path:
constants.SetAdbPath(args.adb_path)
# Some things such as Forwarder require ADB to be in the environment path.
adb_dir = os.path.dirname(constants.GetAdbPath())
if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep):
os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH']
def AddRemoteDeviceOptions(parser):
group = parser.add_argument_group('Remote Device Options')
group.add_argument('--trigger',
help=('Only triggers the test if set. Stores test_run_id '
'in given file path. '))
group.add_argument('--collect',
help=('Only collects the test results if set. '
'Gets test_run_id from given file path.'))
group.add_argument('--remote-device', action='append',
help='Device type to run test on.')
group.add_argument('--results-path',
help='File path to download results to.')
group.add_argument('--api-protocol',
help='HTTP protocol to use. (http or https)')
group.add_argument('--api-address',
help='Address to send HTTP requests.')
group.add_argument('--api-port',
help='Port to send HTTP requests to.')
group.add_argument('--runner-type',
help='Type of test to run as.')
group.add_argument('--runner-package',
help='Package name of test.')
group.add_argument('--device-type',
choices=constants.VALID_DEVICE_TYPES,
help=('Type of device to run on. iOS or android'))
group.add_argument('--device-oem', action='append',
help='Device OEM to run on.')
group.add_argument('--remote-device-file',
help=('File with JSON to select remote device. '
'Overrides all other flags.'))
group.add_argument('--remote-device-timeout', type=int,
help='Times to retry finding remote device')
device_os_group = group.add_mutually_exclusive_group()
device_os_group.add_argument('--remote-device-minimum-os',
help='Minimum OS on device.')
device_os_group.add_argument('--remote-device-os', action='append',
help='OS to have on the device.')
api_secret_group = group.add_mutually_exclusive_group()
api_secret_group.add_argument('--api-secret', default='',
help='API secret for remote devices.')
api_secret_group.add_argument('--api-secret-file', default='',
help='Path to file that contains API secret.')
api_key_group = group.add_mutually_exclusive_group()
api_key_group.add_argument('--api-key', default='',
help='API key for remote devices.')
api_key_group.add_argument('--api-key-file', default='',
help='Path to file that contains API key.')
def AddDeviceOptions(parser):
"""Adds device options to |parser|."""
group = parser.add_argument_group(title='Device Options')
group.add_argument('-c', dest='cleanup_test_files',
help='Cleanup test files on the device after run',
action='store_true')
group.add_argument('--tool',
dest='tool',
help=('Run the test under a tool '
'(use --tool help to list them)'))
group.add_argument('-d', '--device', dest='test_device',
help=('Target device for the test suite '
'to run on.'))
def AddGTestOptions(parser):
"""Adds gtest options to |parser|."""
gtest_suites = list(gtest_config.STABLE_TEST_SUITES
+ gtest_config.EXPERIMENTAL_TEST_SUITES)
group = parser.add_argument_group('GTest Options')
group.add_argument('-s', '--suite', dest='suite_name',
nargs='+', metavar='SUITE_NAME', required=True,
help=('Executable name of the test suite to run. '
'Available suites include (but are not limited to): '
'%s' % ', '.join('"%s"' % s for s in gtest_suites)))
group.add_argument('--gtest_also_run_disabled_tests',
'--gtest-also-run-disabled-tests',
dest='run_disabled', action='store_true',
help='Also run disabled tests if applicable.')
group.add_argument('-a', '--test-arguments', dest='test_arguments',
default='',
help='Additional arguments to pass to the test.')
group.add_argument('-t', dest='timeout', type=int, default=60,
help='Timeout to wait for each test '
'(default: %(default)s).')
group.add_argument('--isolate_file_path',
'--isolate-file-path',
dest='isolate_file_path',
help='.isolate file path to override the default '
'path')
filter_group = group.add_mutually_exclusive_group()
filter_group.add_argument('-f', '--gtest_filter', '--gtest-filter',
dest='test_filter',
help='googletest-style filter string.')
filter_group.add_argument('--gtest-filter-file', dest='test_filter_file',
help='Path to file that contains googletest-style '
'filter strings. (Lines will be joined with '
'":" to create a single filter string.)')
AddDeviceOptions(parser)
AddCommonOptions(parser)
AddRemoteDeviceOptions(parser)
def AddLinkerTestOptions(parser):
group = parser.add_argument_group('Linker Test Options')
group.add_argument('-f', '--gtest-filter', dest='test_filter',
help='googletest-style filter string.')
AddCommonOptions(parser)
AddDeviceOptions(parser)
def AddJavaTestOptions(argument_group):
"""Adds the Java test options to |option_parser|."""
argument_group.add_argument(
'-f', '--test-filter', dest='test_filter',
help=('Test filter (if not fully qualified, will run all matches).'))
argument_group.add_argument(
'-A', '--annotation', dest='annotation_str',
help=('Comma-separated list of annotations. Run only tests with any of '
'the given annotations. An annotation can be either a key or a '
'key-values pair. A test that has no annotation is considered '
'"SmallTest".'))
argument_group.add_argument(
'-E', '--exclude-annotation', dest='exclude_annotation_str',
help=('Comma-separated list of annotations. Exclude tests with these '
'annotations.'))
argument_group.add_argument(
'--screenshot', dest='screenshot_failures', action='store_true',
help='Capture screenshots of test failures')
argument_group.add_argument(
'--save-perf-json', action='store_true',
help='Saves the JSON file for each UI Perf test.')
argument_group.add_argument(
'--official-build', action='store_true', help='Run official build tests.')
argument_group.add_argument(
'--test_data', '--test-data', action='append', default=[],
help=('Each instance defines a directory of test data that should be '
'copied to the target(s) before running the tests. The argument '
'should be of the form <target>:<source>, <target> is relative to '
'the device data directory, and <source> is relative to the '
'chromium build directory.'))
argument_group.add_argument(
'--disable-dalvik-asserts', dest='set_asserts', action='store_false',
default=True, help='Removes the dalvik.vm.enableassertions property')
def ProcessJavaTestOptions(args):
"""Processes options/arguments and populates |options| with defaults."""
# TODO(jbudorick): Handle most of this function in argparse.
if args.annotation_str:
args.annotations = args.annotation_str.split(',')
elif args.test_filter:
args.annotations = []
else:
args.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
'EnormousTest', 'IntegrationTest']
if args.exclude_annotation_str:
args.exclude_annotations = args.exclude_annotation_str.split(',')
else:
args.exclude_annotations = []
def AddInstrumentationTestOptions(parser):
"""Adds Instrumentation test options to |parser|."""
parser.usage = '%(prog)s [options]'
group = parser.add_argument_group('Instrumentation Test Options')
AddJavaTestOptions(group)
java_or_python_group = group.add_mutually_exclusive_group()
java_or_python_group.add_argument(
'-j', '--java-only', action='store_false',
dest='run_python_tests', default=True, help='Run only the Java tests.')
java_or_python_group.add_argument(
'-p', '--python-only', action='store_false',
dest='run_java_tests', default=True,
help='Run only the host-driven tests.')
group.add_argument('--host-driven-root',
help='Root of the host-driven tests.')
group.add_argument('-w', '--wait_debugger', dest='wait_for_debugger',
action='store_true',
help='Wait for debugger.')
group.add_argument('--apk-under-test', dest='apk_under_test',
help=('the name of the apk under test.'))
group.add_argument('--test-apk', dest='test_apk', required=True,
help=('The name of the apk containing the tests '
'(without the .apk extension; '
'e.g. "ContentShellTest").'))
group.add_argument('--coverage-dir',
help=('Directory in which to place all generated '
'EMMA coverage files.'))
group.add_argument('--device-flags', dest='device_flags', default='',
help='The relative filepath to a file containing '
'command-line flags to set on the device')
group.add_argument('--device-flags-file', default='',
help='The relative filepath to a file containing '
'command-line flags to set on the device')
group.add_argument('--isolate_file_path',
'--isolate-file-path',
dest='isolate_file_path',
help='.isolate file path to override the default '
'path')
AddCommonOptions(parser)
AddDeviceOptions(parser)
AddRemoteDeviceOptions(parser)
def ProcessInstrumentationOptions(args):
"""Processes options/arguments and populate |options| with defaults.
Args:
args: argparse.Namespace object.
Returns:
An InstrumentationOptions named tuple which contains all options relevant to
instrumentation tests.
"""
ProcessJavaTestOptions(args)
if not args.host_driven_root:
args.run_python_tests = False
args.test_apk_path = os.path.join(
constants.GetOutDirectory(),
constants.SDK_BUILD_APKS_DIR,
'%s.apk' % args.test_apk)
args.test_apk_jar_path = os.path.join(
constants.GetOutDirectory(),
constants.SDK_BUILD_TEST_JAVALIB_DIR,
'%s.jar' % args.test_apk)
args.test_support_apk_path = '%sSupport%s' % (
os.path.splitext(args.test_apk_path))
args.test_runner = apk_helper.GetInstrumentationName(args.test_apk_path)
# TODO(jbudorick): Get rid of InstrumentationOptions.
return instrumentation_test_options.InstrumentationOptions(
args.tool,
args.cleanup_test_files,
args.annotations,
args.exclude_annotations,
args.test_filter,
args.test_data,
args.save_perf_json,
args.screenshot_failures,
args.wait_for_debugger,
args.coverage_dir,
args.test_apk,
args.test_apk_path,
args.test_apk_jar_path,
args.test_runner,
args.test_support_apk_path,
args.device_flags,
args.isolate_file_path,
args.set_asserts
)
def AddUIAutomatorTestOptions(parser):
"""Adds UI Automator test options to |parser|."""
group = parser.add_argument_group('UIAutomator Test Options')
AddJavaTestOptions(group)
group.add_argument(
'--package', required=True, choices=constants.PACKAGE_INFO.keys(),
metavar='PACKAGE', help='Package under test.')
group.add_argument(
'--test-jar', dest='test_jar', required=True,
help=('The name of the dexed jar containing the tests (without the '
'.dex.jar extension). Alternatively, this can be a full path '
'to the jar.'))
AddCommonOptions(parser)
AddDeviceOptions(parser)
def ProcessUIAutomatorOptions(args):
"""Processes UIAutomator options/arguments.
Args:
args: argparse.Namespace object.
Returns:
A UIAutomatorOptions named tuple which contains all options relevant to
uiautomator tests.
"""
ProcessJavaTestOptions(args)
if os.path.exists(args.test_jar):
# The dexed JAR is fully qualified, assume the info JAR lives along side.
args.uiautomator_jar = args.test_jar
else:
args.uiautomator_jar = os.path.join(
constants.GetOutDirectory(),
constants.SDK_BUILD_JAVALIB_DIR,
'%s.dex.jar' % args.test_jar)
args.uiautomator_info_jar = (
args.uiautomator_jar[:args.uiautomator_jar.find('.dex.jar')] +
'_java.jar')
return uiautomator_test_options.UIAutomatorOptions(
args.tool,
args.cleanup_test_files,
args.annotations,
args.exclude_annotations,
args.test_filter,
args.test_data,
args.save_perf_json,
args.screenshot_failures,
args.uiautomator_jar,
args.uiautomator_info_jar,
args.package,
args.set_asserts)
def AddJUnitTestOptions(parser):
"""Adds junit test options to |parser|."""
group = parser.add_argument_group('JUnit Test Options')
group.add_argument(
'-s', '--test-suite', dest='test_suite', required=True,
help=('JUnit test suite to run.'))
group.add_argument(
'-f', '--test-filter', dest='test_filter',
help='Filters tests googletest-style.')
group.add_argument(
'--package-filter', dest='package_filter',
help='Filters tests by package.')
group.add_argument(
'--runner-filter', dest='runner_filter',
help='Filters tests by runner class. Must be fully qualified.')
group.add_argument(
'--sdk-version', dest='sdk_version', type=int,
help='The Android SDK version.')
AddCommonOptions(parser)
def AddMonkeyTestOptions(parser):
"""Adds monkey test options to |parser|."""
group = parser.add_argument_group('Monkey Test Options')
group.add_argument(
'--package', required=True, choices=constants.PACKAGE_INFO.keys(),
metavar='PACKAGE', help='Package under test.')
group.add_argument(
'--event-count', default=10000, type=int,
help='Number of events to generate (default: %(default)s).')
group.add_argument(
'--category', default='',
help='A list of allowed categories.')
group.add_argument(
'--throttle', default=100, type=int,
help='Delay between events (ms) (default: %(default)s). ')
group.add_argument(
'--seed', type=int,
help=('Seed value for pseudo-random generator. Same seed value generates '
'the same sequence of events. Seed is randomized by default.'))
group.add_argument(
'--extra-args', default='',
help=('String of other args to pass to the command verbatim.'))
AddCommonOptions(parser)
AddDeviceOptions(parser)
def ProcessMonkeyTestOptions(args):
"""Processes all monkey test options.
Args:
args: argparse.Namespace object.
Returns:
A MonkeyOptions named tuple which contains all options relevant to
monkey tests.
"""
# TODO(jbudorick): Handle this directly in argparse with nargs='+'
category = args.category
if category:
category = args.category.split(',')
# TODO(jbudorick): Get rid of MonkeyOptions.
return monkey_test_options.MonkeyOptions(
args.verbose_count,
args.package,
args.event_count,
category,
args.throttle,
args.seed,
args.extra_args)
def AddUirobotTestOptions(parser):
"""Adds uirobot test options to |option_parser|."""
group = parser.add_argument_group('Uirobot Test Options')
group.add_argument('--app-under-test', required=True,
help='APK to run tests on.')
group.add_argument(
'--minutes', default=5, type=int,
help='Number of minutes to run uirobot test [default: %(default)s].')
AddCommonOptions(parser)
AddDeviceOptions(parser)
AddRemoteDeviceOptions(parser)
def AddPerfTestOptions(parser):
"""Adds perf test options to |parser|."""
group = parser.add_argument_group('Perf Test Options')
class SingleStepAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if values and not namespace.single_step:
parser.error('single step command provided, '
'but --single-step not specified.')
elif namespace.single_step and not values:
parser.error('--single-step specified, '
'but no single step command provided.')
setattr(namespace, self.dest, values)
step_group = group.add_mutually_exclusive_group(required=True)
# TODO(jbudorick): Revise --single-step to use argparse.REMAINDER.
# This requires removing "--" from client calls.
step_group.add_argument(
'--single-step', action='store_true',
help='Execute the given command with retries, but only print the result '
'for the "most successful" round.')
step_group.add_argument(
'--steps',
help='JSON file containing the list of commands to run.')
step_group.add_argument(
'--print-step',
help='The name of a previously executed perf step to print.')
group.add_argument(
'--output-json-list',
help='Write a simple list of names from --steps into the given file.')
group.add_argument(
'--collect-chartjson-data',
action='store_true',
help='Cache the chartjson output from each step for later use.')
group.add_argument(
'--output-chartjson-data',
default='',
help='Write out chartjson into the given file.')
group.add_argument(
'--flaky-steps',
help=('A JSON file containing steps that are flaky '
'and will have its exit code ignored.'))
group.add_argument(
'--no-timeout', action='store_true',
help=('Do not impose a timeout. Each perf step is responsible for '
'implementing the timeout logic.'))
group.add_argument(
'-f', '--test-filter',
help=('Test filter (will match against the names listed in --steps).'))
group.add_argument(
'--dry-run', action='store_true',
help='Just print the steps without executing.')
group.add_argument('single_step_command', nargs='*', action=SingleStepAction,
help='If --single-step is specified, the command to run.')
AddCommonOptions(parser)
AddDeviceOptions(parser)
def ProcessPerfTestOptions(args):
"""Processes all perf test options.
Args:
args: argparse.Namespace object.
Returns:
A PerfOptions named tuple which contains all options relevant to
perf tests.
"""
# TODO(jbudorick): Move single_step handling down into the perf tests.
if args.single_step:
args.single_step = ' '.join(args.single_step_command)
# TODO(jbudorick): Get rid of PerfOptions.
return perf_test_options.PerfOptions(
args.steps, args.flaky_steps, args.output_json_list,
args.print_step, args.no_timeout, args.test_filter,
args.dry_run, args.single_step, args.collect_chartjson_data,
args.output_chartjson_data)
def AddPythonTestOptions(parser):
group = parser.add_argument_group('Python Test Options')
group.add_argument(
'-s', '--suite', dest='suite_name', metavar='SUITE_NAME',
choices=constants.PYTHON_UNIT_TEST_SUITES.keys(),
help='Name of the test suite to run.')
AddCommonOptions(parser)
def _RunGTests(args, devices):
"""Subcommand of RunTestsCommands which runs gtests."""
exit_code = 0
for suite_name in args.suite_name:
# TODO(jbudorick): Either deprecate multi-suite or move its handling down
# into the gtest code.
gtest_options = gtest_test_options.GTestOptions(
args.tool,
args.cleanup_test_files,
args.test_filter,
args.run_disabled,
args.test_arguments,
args.timeout,
args.isolate_file_path,
suite_name)
runner_factory, tests = gtest_setup.Setup(gtest_options, devices)
results, test_exit_code = test_dispatcher.RunTests(
tests, runner_factory, devices, shard=True, test_timeout=None,
num_retries=args.num_retries)
if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
exit_code = test_exit_code
report_results.LogFull(
results=results,
test_type='Unit test',
test_package=suite_name,
flakiness_server=args.flakiness_dashboard_server)
if args.json_results_file:
json_results.GenerateJsonResultsFile(results, args.json_results_file)
if os.path.isdir(constants.ISOLATE_DEPS_DIR):
shutil.rmtree(constants.ISOLATE_DEPS_DIR)
return exit_code
def _RunLinkerTests(args, devices):
"""Subcommand of RunTestsCommands which runs linker tests."""
runner_factory, tests = linker_setup.Setup(args, devices)
results, exit_code = test_dispatcher.RunTests(
tests, runner_factory, devices, shard=True, test_timeout=60,
num_retries=args.num_retries)
report_results.LogFull(
results=results,
test_type='Linker test',
test_package='ChromiumLinkerTest')
if args.json_results_file:
json_results.GenerateJsonResultsFile(results, args.json_results_file)
return exit_code
def _RunInstrumentationTests(args, devices):
"""Subcommand of RunTestsCommands which runs instrumentation tests."""
logging.info('_RunInstrumentationTests(%s, %s)' % (str(args), str(devices)))
instrumentation_options = ProcessInstrumentationOptions(args)
if len(devices) > 1 and args.wait_for_debugger:
logging.warning('Debugger can not be sharded, using first available device')
devices = devices[:1]
results = base_test_result.TestRunResults()
exit_code = 0
if args.run_java_tests:
runner_factory, tests = instrumentation_setup.Setup(
instrumentation_options, devices)
test_results, exit_code = test_dispatcher.RunTests(
tests, runner_factory, devices, shard=True, test_timeout=None,
num_retries=args.num_retries)
results.AddTestRunResults(test_results)
if args.run_python_tests:
runner_factory, tests = host_driven_setup.InstrumentationSetup(
args.host_driven_root, args.official_build,
instrumentation_options)
if tests:
test_results, test_exit_code = test_dispatcher.RunTests(
tests, runner_factory, devices, shard=True, test_timeout=None,
num_retries=args.num_retries)
results.AddTestRunResults(test_results)
# Only allow exit code escalation
if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
exit_code = test_exit_code
if args.device_flags:
args.device_flags = os.path.join(constants.DIR_SOURCE_ROOT,
args.device_flags)
report_results.LogFull(
results=results,
test_type='Instrumentation',
test_package=os.path.basename(args.test_apk),
annotation=args.annotations,
flakiness_server=args.flakiness_dashboard_server)
if args.json_results_file:
json_results.GenerateJsonResultsFile(results, args.json_results_file)
return exit_code
def _RunUIAutomatorTests(args, devices):
"""Subcommand of RunTestsCommands which runs uiautomator tests."""
uiautomator_options = ProcessUIAutomatorOptions(args)
runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)
results, exit_code = test_dispatcher.RunTests(
tests, runner_factory, devices, shard=True, test_timeout=None,
num_retries=args.num_retries)
report_results.LogFull(
results=results,
test_type='UIAutomator',
test_package=os.path.basename(args.test_jar),
annotation=args.annotations,
flakiness_server=args.flakiness_dashboard_server)
if args.json_results_file:
json_results.GenerateJsonResultsFile(results, args.json_results_file)
return exit_code
def _RunJUnitTests(args):
"""Subcommand of RunTestsCommand which runs junit tests."""
runner_factory, tests = junit_setup.Setup(args)
_, exit_code = junit_dispatcher.RunTests(tests, runner_factory)
return exit_code
def _RunMonkeyTests(args, devices):
"""Subcommand of RunTestsCommands which runs monkey tests."""
monkey_options = ProcessMonkeyTestOptions(args)
runner_factory, tests = monkey_setup.Setup(monkey_options)
results, exit_code = test_dispatcher.RunTests(
tests, runner_factory, devices, shard=False, test_timeout=None,
num_retries=args.num_retries)
report_results.LogFull(
results=results,
test_type='Monkey',
test_package='Monkey')
if args.json_results_file:
json_results.GenerateJsonResultsFile(results, args.json_results_file)
return exit_code
def _RunPerfTests(args):
"""Subcommand of RunTestsCommands which runs perf tests."""
perf_options = ProcessPerfTestOptions(args)
# Just save a simple json with a list of test names.
if perf_options.output_json_list:
return perf_test_runner.OutputJsonList(
perf_options.steps, perf_options.output_json_list)
# Just print the results from a single previously executed step.
if perf_options.print_step:
return perf_test_runner.PrintTestOutput(
perf_options.print_step, perf_options.output_chartjson_data)
runner_factory, tests, devices = perf_setup.Setup(perf_options)
# shard=False means that each device will get the full list of tests
# and then each one will decide their own affinity.
# shard=True means each device will pop the next test available from a queue,
# which increases throughput but have no affinity.
results, _ = test_dispatcher.RunTests(
tests, runner_factory, devices, shard=False, test_timeout=None,
num_retries=args.num_retries)
report_results.LogFull(
results=results,
test_type='Perf',
test_package='Perf')
if args.json_results_file:
json_results.GenerateJsonResultsFile(results, args.json_results_file)
if perf_options.single_step:
return perf_test_runner.PrintTestOutput('single_step')
perf_test_runner.PrintSummary(tests)
# Always return 0 on the sharding stage. Individual tests exit_code
# will be returned on the print_step stage.
return 0
def _RunPythonTests(args):
"""Subcommand of RunTestsCommand which runs python unit tests."""
suite_vars = constants.PYTHON_UNIT_TEST_SUITES[args.suite_name]
suite_path = suite_vars['path']
suite_test_modules = suite_vars['test_modules']
sys.path = [suite_path] + sys.path
try:
suite = unittest.TestSuite()
suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m)
for m in suite_test_modules)
runner = unittest.TextTestRunner(verbosity=1+args.verbose_count)
return 0 if runner.run(suite).wasSuccessful() else 1
finally:
sys.path = sys.path[1:]
def _GetAttachedDevices(test_device=None):
"""Get all attached devices.
Args:
test_device: Name of a specific device to use.
Returns:
A list of attached devices.
"""
attached_devices = []
attached_devices = android_commands.GetAttachedDevices()
if test_device:
assert test_device in attached_devices, (
'Did not find device %s among attached device. Attached devices: %s'
% (test_device, ', '.join(attached_devices)))
attached_devices = [test_device]
assert attached_devices, 'No devices attached.'
return sorted(attached_devices)
def RunTestsCommand(args, parser):
"""Checks test type and dispatches to the appropriate function.
Args:
args: argparse.Namespace object.
parser: argparse.ArgumentParser object.
Returns:
Integer indicated exit code.
Raises:
Exception: Unknown command name passed in, or an exception from an
individual test runner.
"""
command = args.command
ProcessCommonOptions(args)
if args.enable_platform_mode:
return RunTestsInPlatformMode(args, parser)
if command in constants.LOCAL_MACHINE_TESTS:
devices = []
else:
devices = _GetAttachedDevices(args.test_device)
forwarder.Forwarder.RemoveHostLog()
if not ports.ResetTestServerPortAllocation():
raise Exception('Failed to reset test server port.')
if command == 'gtest':
return _RunGTests(args, devices)
elif command == 'linker':
return _RunLinkerTests(args, devices)
elif command == 'instrumentation':
return _RunInstrumentationTests(args, devices)
elif command == 'uiautomator':
return _RunUIAutomatorTests(args, devices)
elif command == 'junit':
return _RunJUnitTests(args)
elif command == 'monkey':
return _RunMonkeyTests(args, devices)
elif command == 'perf':
return _RunPerfTests(args)
elif command == 'python':
return _RunPythonTests(args)
else:
raise Exception('Unknown test type.')
_SUPPORTED_IN_PLATFORM_MODE = [
# TODO(jbudorick): Add support for more test types.
'gtest',
'instrumentation',
'uirobot',
]
def RunTestsInPlatformMode(args, parser):
if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
parser.error('%s is not yet supported in platform mode' % args.command)
with environment_factory.CreateEnvironment(args, parser.error) as env:
with test_instance_factory.CreateTestInstance(args, parser.error) as test:
with test_run_factory.CreateTestRun(
args, env, test, parser.error) as test_run:
results = test_run.RunTests()
if args.environment == 'remote_device' and args.trigger:
return 0 # Not returning results, only triggering.
report_results.LogFull(
results=results,
test_type=test.TestType(),
test_package=test_run.TestPackage(),
annotation=getattr(args, 'annotations', None),
flakiness_server=getattr(args, 'flakiness_dashboard_server', None))
if args.json_results_file:
json_results.GenerateJsonResultsFile(
results, args.json_results_file)
return 0 if results.DidRunPass() else constants.ERROR_EXIT_CODE
CommandConfigTuple = collections.namedtuple(
'CommandConfigTuple',
['add_options_func', 'help_txt'])
VALID_COMMANDS = {
'gtest': CommandConfigTuple(
AddGTestOptions,
'googletest-based C++ tests'),
'instrumentation': CommandConfigTuple(
AddInstrumentationTestOptions,
'InstrumentationTestCase-based Java tests'),
'uiautomator': CommandConfigTuple(
AddUIAutomatorTestOptions,
"Tests that run via Android's uiautomator command"),
'junit': CommandConfigTuple(
AddJUnitTestOptions,
'JUnit4-based Java tests'),
'monkey': CommandConfigTuple(
AddMonkeyTestOptions,
"Tests based on Android's monkey"),
'perf': CommandConfigTuple(
AddPerfTestOptions,
'Performance tests'),
'python': CommandConfigTuple(
AddPythonTestOptions,
'Python tests based on unittest.TestCase'),
'linker': CommandConfigTuple(
AddLinkerTestOptions,
'Linker tests'),
'uirobot': CommandConfigTuple(
AddUirobotTestOptions,
'Uirobot test'),
}
def DumpThreadStacks(_signal, _frame):
for thread in threading.enumerate():
reraiser_thread.LogThreadStack(thread)
def main():
signal.signal(signal.SIGUSR1, DumpThreadStacks)
parser = argparse.ArgumentParser()
command_parsers = parser.add_subparsers(title='test types',
dest='command')
for test_type, config in sorted(VALID_COMMANDS.iteritems(),
key=lambda x: x[0]):
subparser = command_parsers.add_parser(
test_type, usage='%(prog)s [options]', help=config.help_txt)
config.add_options_func(subparser)
args = parser.parse_args()
try:
return RunTestsCommand(args, parser)
except base_error.BaseError as e:
logging.exception('Error occurred.')
if e.is_infra_error:
return constants.INFRA_EXIT_CODE
else:
return constants.ERROR_EXIT_CODE
except: # pylint: disable=W0702
logging.exception('Unrecognized error occurred.')
return constants.ERROR_EXIT_CODE
if __name__ == '__main__':
sys.exit(main())
|
|
"""
Models for actual Polling Stations and Polling Districts!
"""
from itertools import groupby
import re
import urllib.parse
from django.contrib.gis.db import models
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext as _
from councils.models import Council
class PollingDistrict(models.Model):
name = models.CharField(blank=True, null=True, max_length=255)
council = models.ForeignKey(Council, null=True)
internal_council_id = models.CharField(blank=True, max_length=100)
extra_id = models.CharField(blank=True, null=True, max_length=100)
area = models.MultiPolygonField(null=True, blank=True)
# This is NOT a FK, as we might not have the polling station at
# the point of import
polling_station_id = models.CharField(blank=True, max_length=255)
class Meta:
unique_together = (("council", "internal_council_id"))
objects = models.GeoManager()
def __unicode__(self):
name = self.name or "Unnamed"
return "%s (%s)" % (name, self.council)
class PollingStationManager(models.GeoManager):
def get_polling_station(self, council_id,
location=None, polling_district=None):
assert any((polling_district, location))
if not polling_district:
try:
polling_district = PollingDistrict.objects.get(
area__covers=location)
except PollingDistrict.DoesNotExist:
return None
if polling_district.internal_council_id:
# always attempt to look up district id in stations table
station = self.filter(
polling_district_id=polling_district.internal_council_id,
council_id=council_id
)
if len(station) == 1:
return station[0]
else:
addresses = set([s.address for s in station])
if len(addresses) == 1:
return station[0]
if polling_district.polling_station_id:
# only try to look up station id if it is a sensible value
station = self.get_polling_station_by_id(
internal_council_id=polling_district.polling_station_id,
council_id=council_id
)
# if polling_station_id is set and we don't get a station back
# or it maps to more than one station due to dodgy data
# do not fall back and attempt point within polygon lookup
return station
else:
# only try a point within polygon lookup
# if polling_station_id is not set
station = self.filter(
location__within=polling_district.area, council_id=council_id)
if len(station) == 1:
return station[0]
else:
# make this explicit rather than implied
return None
def get_polling_station_by_id(self, internal_council_id, council_id):
station = self.filter(
internal_council_id=internal_council_id,
council_id=council_id
)
if len(station) == 1:
return station[0]
else:
return None
class PollingStation(models.Model):
council = models.ForeignKey(Council, null=True, db_index=True)
internal_council_id = models.CharField(
blank=True, max_length=100, db_index=True)
postcode = models.CharField(blank=True, null=True, max_length=100)
address = models.TextField(blank=True, null=True)
location = models.PointField(null=True, blank=True)
# This is NOT a FK, as we might not have the polling district at
# the point of import
polling_district_id = models.CharField(blank=True, max_length=255)
class Meta:
unique_together = (("council", "internal_council_id"))
index_together = [
["council", "internal_council_id"],
["council", "polling_district_id"]
]
objects = PollingStationManager()
def __str__(self):
return "{0} ({1})".format(self.internal_council_id, self.council)
@property
def formatted_address(self):
if not self.address:
return None
return "\n".join([x[0] for x in groupby(self.address.split(','))])
class ResidentialAddress(models.Model):
address = models.TextField(blank=True, null=True)
postcode = models.CharField(blank=True, null=True, max_length=100, db_index=True)
council = models.ForeignKey(Council, null=True)
polling_station_id = models.CharField(blank=True, max_length=100)
slug = models.SlugField(blank=False, null=False, db_index=True, unique=True, max_length=255)
def save(self, *args, **kwargs):
"""
strip all whitespace from postcode and convert to uppercase
this will make it easier to query based on user-supplied postcode
"""
self.postcode = re.sub('[^A-Z0-9]', '', self.postcode.upper())
super().save(*args, **kwargs)
class CustomFinderManager(models.Manager):
def get_custom_finder(self, gss_codes, postcode):
try:
finder = self.get(pk__in=gss_codes)
finder.message = _(finder.message)
"""
EONI's poling station finder requires postcode to have a space :(
http://www.eoni.org.uk/Offices/Postcode-Search-Results?postcode=BT5+7TQ
will produce a result, whereas
http://www.eoni.org.uk/Offices/Postcode-Search-Results?postcode=BT57TQ
will not.
We might need to take a more sophisticated approach as we add more custom finders
that accept postcodes (e.g: a postcode format flag in the database).
At the moment I only have this one to work with.
"""
finder.encoded_postcode = urllib.parse.quote(
"%s %s" % (postcode[:(len(postcode)-3)], postcode[-3:])
)
return finder
except ObjectDoesNotExist:
return None
class CustomFinder(models.Model):
"""
Store details of areas that have their own
custom polling station finders
and/or a message that we might want to show.
Example content:
record = CustomFinder(
area_code='E07000082'
base_url='https://stroud.maps.arcgis.com/apps/webappviewer/index.html?id=ea6bf4b3655542c1a05c8d7e87d32bb1'
can_pass_postcode=False
message="Stroud District Council has its own polling station finder:"
)
record.save()
record = CustomFinder(
area_code='W06000008'
base_url=''
can_pass_postcode=False
message='<h2>We're working on it!</h2>Ceredigion Council have provided polling station data. It will be available soon.'
)
record.save()
record = CustomFinder(
area_code='N07000001'
base_url='http://www.eoni.org.uk/Offices/Postcode-Search-Results?postcode='
can_pass_postcode=True
message='The Electoral Office of Northern Ireland has its own polling station finder:'
)
record.save()
"""
area_code = models.CharField(max_length=9, primary_key=True,
help_text="The GSS code for this area")
base_url = models.CharField(blank=True, max_length=255,
help_text="The landing page for the polling station finder")
can_pass_postcode = models.BooleanField(default=False,
help_text="Does the URL have '?postcode=' in it?")
message = models.TextField(blank=True,
default="This council has its own polling station finder:")
objects = CustomFinderManager()
|
|
# python-automata, the Python DFA library
# License: New BSD License
# Author: Andrew Badr
# Version: July 17, 2008
# Contact: [email protected]
# Code contributions are welcome.
from copy import copy
from .UnionFind import UnionFind
# TODO: general code cleanup
# TODO: write tests
class DFA:
"""This class represents a deterministic finite automaton."""
def __init__(self, states, alphabet, delta, start, accepts):
"""The inputs to the class are as follows:
- states: An iterable containing the states of the DFA. States must be immutable.
- alphabet: An iterable containing the symbols in the DFA's alphabet. Symbols must be immutable.
- delta: A complete function from [states]x[alphabets]->[states].
- start: The state at which the DFA begins operation.
- accepts: A list containing the "accepting" or "final" states of the DFA.
Making delta a function rather than a transition table makes it much easier to define certain DFAs.
If you want to use a transition table, you can just do this:
delta = lambda q,c: transition_table[q][c]
One caveat is that the function should not depend on the value of 'states' or 'accepts', since
these may be modified during minimization.
Finally, the names of states and inputs should be hashable. This generally means strings, numbers,
or tuples of hashables.
"""
self.states = set(states)
self.start = start
self.delta = delta
self.accepts = set(accepts)
self.alphabet = set(alphabet)
self.current_state = start
#
# Administrative functions:
#
def pretty_print(self):
"""Displays all information about the DFA in an easy-to-read way. Not
actually that easy to read if it has too many states.
"""
print("")
print("This DFA has %s states" % len(self.states))
print("States:", self.states)
print("Alphabet:", self.alphabet)
print("Starting state:", self.start)
print("Accepting states:", self.accepts)
print("Transition function:")
print("\t","\t".join(map(str, sorted(self.states))))
for c in self.alphabet:
results = [self.delta(x, c) for x in sorted(self.states)]
print(c, "->\t", "\t".join(map(str, results)))
print("Current state:", self.current_state)
print("Currently accepting:", self.status())
print("")
def validate(self):
"""Checks that:
(1) The accepting-state set is a subset of the state set.
(2) The start-state is a member of the state set.
(3) The current-state is a member of the state set.
(4) Every transition returns a member of the state set.
"""
assert set(self.accepts).issubset(set(self.states))
assert self.start in self.states
assert self.current_state in self.states
for state in self.states:
for char in self.alphabet:
assert self.delta(state, char) in self.states
def copy(self):
"""Returns a copy of the DFA. No data is shared with the original."""
return DFA(self.states, self.alphabet, self.delta, self.start, self.accepts)
#
# Simulating execution:
#
def input(self, char):
"""Updates the DFA's current state based on a single character of input."""
self.current_state = self.delta(self.current_state, char)
def input_sequence(self, char_sequence):
"""Updates the DFA's current state based on an iterable of inputs."""
for char in char_sequence:
self.input(char)
def status(self):
"""Indicates whether the DFA's current state is accepting."""
return (self.current_state in self.accepts)
def reset(self):
"""Returns the DFA to the starting state."""
self.current_state = self.start
def recognizes(self, char_sequence):
"""Indicates whether the DFA accepts a given string."""
state_save = self.current_state
self.reset()
self.input_sequence(char_sequence)
valid = self.status()
self.current_state = state_save
return valid
#
# Minimization methods and their helper functions
#
def state_hash(self, value):
"""Creates a hash with one key for every state in the DFA, and
all values initialized to the 'value' passed.
"""
d = {}
for state in self.states:
if callable(value):
d[state] = value()
else:
d[state] = value
return d
def state_merge(self, q1, q2):
"""Merges q1 into q2. All transitions to q1 are moved to q2.
If q1 was the start or current state, those are also moved to q2.
"""
self.states.remove(q1)
if q1 in self.accepts:
self.accepts.remove(q1)
if self.current_state == q1:
self.current_state = q2
if self.start == q1:
self.start = q2
transitions = {}
for state in self.states: #without q1
transitions[state] = {}
for char in self.alphabet:
next = self.delta(state, char)
if next == q1:
next = q2
transitions[state][char] = next
self.delta = (lambda s, c: transitions[s][c])
def reachable_from(self, q0, inclusive=True):
"""Returns the set of states reachable from given state q0. The optional
parameter "inclusive" indicates that q0 should always be included.
"""
reached = self.state_hash(False)
if inclusive:
reached[q0] = True
to_process = [q0]
while len(to_process):
q = to_process.pop()
for c in self.alphabet:
next = self.delta(q, c)
if reached[next] == False:
reached[next] = True
to_process.append(next)
return [q for q in self.states if reached[q]]
def reachable(self):
"""Returns the reachable subset of the DFA's states."""
return self.reachable_from(self.start)
def delete_unreachable(self):
"""Deletes all the unreachable states."""
reachable = self.reachable()
self.states = reachable
new_accepts = []
for q in self.accepts:
if q in self.states:
new_accepts.append(q)
self.accepts = new_accepts
def mn_classes(self):
"""Returns a partition of self.states into Myhill-Nerode equivalence classes."""
changed = True
classes = []
if self.accepts != []:
classes.append(self.accepts)
nonaccepts = [x for x in self.states if x not in self.accepts]
if nonaccepts != []:
classes.append(nonaccepts)
while changed:
changed = False
for cl in classes:
local_change = False
for alpha in self.alphabet:
next_class = None
new_class = []
for state in cl:
next = self.delta(state, alpha)
if next_class == None:
for c in classes:
if next in c:
next_class = c
elif next not in next_class:
new_class.append(state)
changed = True
local_change = True
if local_change == True:
old_class = []
for c in cl:
if c not in new_class:
old_class.append(c)
classes.remove(cl)
classes.append(old_class)
classes.append(new_class)
break
return classes
def collapse(self, partition):
"""Given a partition of the DFA's states into equivalence classes,
collapses every equivalence class into a single "representative" state.
Returns the hash mapping each old state to its new representative.
"""
new_states = []
new_start = None
new_delta = None
new_accepts = []
#alphabet stays the same
new_current_state = None
state_map = {}
#build new_states, new_start, new_current_state:
for state_class in partition:
representative = state_class[0]
new_states.append(representative)
for state in state_class:
state_map[state] = representative
if state == self.start:
new_start = representative
if state == self.current_state:
new_current_state = representative
#build new_accepts:
for acc in self.accepts:
if acc in new_states:
new_accepts.append(acc)
#build new_delta:
transitions = {}
for state in new_states:
transitions[state] = {}
for alpha in self.alphabet:
transitions[state][alpha] = state_map[self.delta(state, alpha)]
new_delta = (lambda s, a: transitions[s][a])
self.states = new_states
self.start = new_start
self.delta = new_delta
self.accepts = new_accepts
self.current_state = new_current_state
return state_map
def minimize(self):
"""Classical DFA minimization, using the simple O(n^2) algorithm.
Side effect: can mix up the internal ordering of states.
"""
#Step 1: Delete unreachable states
self.delete_unreachable()
#Step 2: Partition the states into equivalence classes
classes = self.mn_classes()
#Step 3: Construct the new DFA
self.collapse(classes)
def preamble_and_kernel(self):
"""Returns the partition of the state-set into the preamble and
kernel as a 2-tuple. A state is in the preamble iff there
are finitely many strings that reach it from the start state.
See "The DFAs of Finitely Different Regular Languages" for context.
"""
#O(n^2): can this be improved?
reachable = {}
for q in self.states:
reachable[q] = self.reachable_from(q, inclusive=False)
in_fin = self.state_hash(True)
for q in reachable[self.start]:
if q in reachable[q]:
for next in reachable[q]:
in_fin[next] = False
preamble = [x for x in self.states if in_fin[x]]
kernel = [x for x in self.states if not in_fin[x]]
return (preamble, kernel)
def pluck_leaves(self):
"""Only for minimized automata. Returns a topologically ordered list of
all the states that induce a finite language. Runs in linear time.
"""
#Step 1: Build the states' profiles
loops = self.state_hash(0)
inbound = self.state_hash(list)
outbound = self.state_hash(list)
for state in self.states:
for c in self.alphabet:
next = self.delta(state, c)
inbound[next].append(state)
outbound[state].append(next)
if state == next:
loops[state] += 1
#Step 2: Add sink state to to_pluck
to_pluck = []
for state in self.states:
if len(outbound[state]) == loops[state]:
if not state in self.accepts:
#prints("Adding '%s' to be plucked" % state)
to_pluck.append(state)
#Step 3: Pluck!
plucked = []
while len(to_pluck):
state = to_pluck.pop()
#prints("Plucking %s" % state)
plucked.append(state)
for incoming in inbound[state]:
#prints("Deleting %s->%s edge" % (incoming, state))
outbound[incoming].remove(state)
if (len(outbound[incoming]) == 0) and (incoming != state):
to_pluck.append(incoming)
#prints("Adding '%s' to be plucked" % incoming)
plucked.reverse()
return plucked
def right_finite_states(self, sink_states):
"""Given a DFA (self) and a list of states (sink_states) that are assumed to induce the
empty language, return the topologically-ordered set of states in the DFA that induce
finite languages.
"""
#Step 1: Build the states' profiles
inbound = self.state_hash(list)
outbound = self.state_hash(list)
for state in self.states:
if state in sink_states:
continue
for c in self.alphabet:
next = self.delta(state, c)
inbound[next].append(state)
outbound[state].append(next)
#Step 2: Pluck!
to_pluck = sink_states
plucked = []
while len(to_pluck):
state = to_pluck.pop()
plucked.append(state)
for incoming in inbound[state]:
outbound[incoming].remove(state)
if (len(outbound[incoming]) == 0) and (incoming != state):
to_pluck.append(incoming)
plucked.reverse()
return plucked
def is_finite(self):
"""Indicates whether the DFA's language is a finite set."""
D2 = self.copy()
D2.minimize()
plucked = D2.pluck_leaves()
return (D2.start in plucked)
def states_fd_equivalent(self, q1, q2):
"""Indicates whether q1 and q2 only have finitely many distinguishing strings."""
d1 = DFA(states=self.states, start=q1, accepts=self.accepts, delta=self.delta, alphabet=self.alphabet)
d2 = DFA(states=self.states, start=q2, accepts=self.accepts, delta=self.delta, alphabet=self.alphabet)
sd_dfa = symmetric_difference(d1, d2)
return sd_dfa.is_finite()
def f_equivalence_classes(self):
"""Returns a partition of the states into finite-difference equivalence clases, using
the experimental O(n^2) algorithm."""
sd = symmetric_difference(self, self)
self_pairs = [(x, x) for x in self.states]
fd_equiv_pairs = sd.right_finite_states(self_pairs)
sets = UnionFind()
for state in self.states:
sets.make_set(state)
for (state1, state2) in fd_equiv_pairs:
set1, set2 = sets.find(state1), sets.find(state2)
if set1 != set2:
sets.union(set1, set2)
state_classes = sets.as_lists()
return state_classes
def hyper_minimize(self):
"""Alters the DFA into a smallest possible DFA recognizing a finitely different language.
In other words, if D is the original DFA and D' the result of this function, then the
symmetric difference of L(D) and L(D') will be a finite set, and there exists no smaller
automaton than D' with this property.
See "The DFAs of Finitely Different Regular Languages" for context.
"""
# Step 1: Classical minimization
self.minimize()
# Step 2: Partition states into equivalence classes
state_classes = self.f_equivalence_classes()
# Step 3: Find preamble and kernel parts
(preamble, kernel) = self.preamble_and_kernel()
# Step 4: Merge (f_merge_states in the paper)
# (Could be done more efficiently)
for sc in state_classes:
pres = [s for s in sc if s in preamble]
kers = [s for s in sc if s in kernel]
if len(kers):
rep = kers[0]
for p_state in pres:
self.state_merge(p_state, rep)
else:
rep = pres[0]
for p_state in pres[1:]:
self.state_merge(p_state, rep)
def levels(self):
"""Returns a dictionary mapping each state to its distance from the starting state."""
levels = {}
seen = [self.start]
levels[self.start] = 0
level_number = 0
level_states = [self.start]
while len(level_states):
next_level_states = []
next_level_number = level_number + 1
for q in level_states:
for c in self.alphabet:
next = self.delta(q, c)
if next not in seen:
seen.append(next)
levels[next] = next_level_number
next_level_states.append(next)
level_states = next_level_states
level_number = next_level_number
return levels
def longest_word_length(self):
"""Given a DFA recognizing a finite language, returns the length of the
longest word in that language, or None if the language is empty.
Assumes the input is minimized.
"""
assert(self.is_finite())
def long_path(q,length, longest):
if q in self.accepts:
if length > longest:
longest = length
for char in self.alphabet:
next = self.delta(q, char)
if next != q:
candidate = long_path(next, length+1, longest)
if candidate > longest:
longest = candidate
return longest
return long_path(self.start, 0, None)
def DFCA_minimize(self, l=None):
"""DFCA minimization"
Input: "self" is a DFA accepting a finite language
Result: "self" is DFCA-minimized, and the returned value is the length of the longest
word accepted by the original DFA
See "Minimal cover-automata for finite languages" for context on DFCAs, and
"An O(n^2) Algorithm for Constructing Minimal Cover Automata for Finite Languages"
for the source of this algorithm (Campeanu, Paun, Santean, and Yu). We follow their
algorithm exactly, except that "l" is optionally calculated for you, and the state-
ordering is automatically created.
There exists a faster, O(n*logn)-time algorithm due to Korner, from CIAA 2002.
"""
assert(self.is_finite())
self.minimize()
###Step 0: Numbering the states and computing "l"
n = len(self.states) - 1
state_order = self.pluck_leaves()
if l==None:
l = self.longest_word_length()
#We're giving each state a numerical name so that the algorithm can
# run on an "ordered" DFA -- see the paper for why. These functions
# allow us to copiously convert between names.
def nn(q): # "numerical name"
return state_order.index(q)
def rn(n): # "real name"
return state_order[n]
###Step 1: Computing the gap function
# 1.1 -- Step numbering is from the paper
level = self.levels() #holds real names
gap = {} #holds numerical names
# 1.2
for i in range(n):
gap[(i, n)] = l
if level[rn(n)] <= l:
for q in self.accepts:
gap[(nn(q), n)] = 0
# 1.3
for i in range(n-1):
for j in range(i+1, n):
if (rn(i) in self.accepts)^(rn(j) in self.accepts):
gap[(i,j)] = 0
else:
gap[(i,j)] = l
# 1.4
def level_range(i, j):
return l - max(level[rn(i)], level[rn(j)])
for i in range(n-2, -1, -1):
for j in range(n, i, -1):
for char in self.alphabet:
i2 = nn(self.delta(rn(i), char))
j2 = nn(self.delta(rn(j), char))
if i2 != j2:
if i2 < j2:
g = gap[(i2, j2)]
else:
g = gap[(j2, i2)]
if g+1 <= level_range(i, j):
gap[(i,j)] = min(gap[(i,j)], g+1)
###Step 2: Merging states
# 2.1
P = {}
for i in range(n+1):
P[i] = False
# 2.2
for i in range(n):
if P[i] == False:
for j in range(i+1, n+1):
if (P[j] == False) and (gap[(i,j)] == l):
self.state_merge(rn(j), rn(i))
P[j] = True
return l
#
# Boolean set operations on languages -- end of the DFA class
#
def cross_product(D1, D2, accept_method):
"""A generalized cross-product constructor over two DFAs.
The third argument is a binary boolean function f; a state (q1, q2) in the final
DFA accepts if f(A[q1],A[q2]), where A indicates the acceptance-value of the state.
"""
assert(D1.alphabet == D2.alphabet)
states = []
for s1 in D1.states:
for s2 in D2.states:
states.append((s1,s2))
start = (D1.start, D2.start)
def delta(state_pair, char):
next_D1 = D1.delta(state_pair[0], char)
next_D2 = D2.delta(state_pair[1], char)
return (next_D1, next_D2)
alphabet = copy(D1.alphabet)
accepts = []
for (s1, s2) in states:
a1 = s1 in D1.accepts
a2 = s2 in D2.accepts
if accept_method(a1, a2):
accepts.append((s1, s2))
return DFA(states=states, start=start, delta=delta, accepts=accepts, alphabet=alphabet)
def intersection(D1, D2):
"""Constructs an unminimized DFA recognizing the intersection of the languages of two given DFAs."""
f = bool.__and__
return cross_product(D1, D2, f)
def union(D1, D2):
"""Constructs an unminimized DFA recognizing the union of the languages of two given DFAs."""
f = bool.__or__
return cross_product(D1, D2, f)
def symmetric_difference(D1, D2):
"""Constructs an unminimized DFA recognizing the symmetric difference of the languages of two given DFAs."""
f = bool.__xor__
return cross_product(D1, D2, f)
def inverse(D):
"""Constructs an unminimized DFA recognizing the inverse of the language of a given DFA."""
new_accepts = []
for state in D.states:
if state not in D.accepts:
new_accepts.append(state)
return DFA(states=D.states, start=D.start, delta=D.delta, accepts=new_accepts, alphabet=D.alphabet)
#
# Constructing new DFAs
#
def from_word_list(language, alphabet):
"""Constructs an unminimized DFA accepting the given finite language."""
accepts = language
start = ''
sink = 'sink'
states = [start, sink]
for word in language:
for i in range(len(word)):
prefix = word[:i+1]
if prefix not in states:
states.append(prefix)
fwl = copy(states)
def delta(q, c):
next = q+c
if next in fwl:
return next
else:
return sink
return DFA(states=states, alphabet=alphabet, delta=delta, start=start, accepts=accepts)
def modular_zero(n, base=2):
"""Returns a DFA that accepts all binary numbers equal to 0 mod n. Use the optional
parameter "base" if you want something other than binary. The empty string is also
included in the DFA's language.
"""
states = list(range(n))
alphabet = list(map(str, list(range(base))))
delta = lambda q, c: ((q*base+int(c)) % n)
start = 0
accepts = [0]
return DFA(states=states, alphabet=alphabet, delta=delta, start=start, accepts=accepts)
def random(states_size, alphabet_size, acceptance=0.5):
"""Constructs a random DFA with "states_size" states and "alphabet_size" inputs. Each
transition destination is chosen uniformly at random, so the resultant DFA may have
unreachable states. The optional "acceptance" parameter indicates what fraction of
the states should be accepting.
"""
import random
states = list(range(states_size))
start = 0
alphabet = list(range(alphabet_size))
accepts = random.sample(states, int(acceptance*states_size))
tt = {}
for q in states:
tt[q] = {}
for c in alphabet:
tt[q][c] = random.choice(states)
delta = lambda q, c: tt[q][c]
return DFA(states, alphabet, delta, start, accepts)
#
# Finite-factoring
#
def finite_factor(self):
D1 = self.copy()
D1.minimize()
D2 = D1.copy()
D2.hyper_minimize()
D3 = symmetric_difference(D1, D2)
l = D3.DFCA_minimize()
return (D2, (D3, l))
|
|
# -*- coding: utf-8 -*-
"""
Python Markdown
A Python implementation of John Gruber's Markdown.
Documentation: https://python-markdown.github.io/
GitHub: https://github.com/Python-Markdown/markdown/
PyPI: https://pypi.org/project/Markdown/
Started by Manfred Stienstra (http://www.dwerg.net/).
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
Currently maintained by Waylan Limberg (https://github.com/waylan),
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE.md for details).
"""
from __future__ import unicode_literals
import re
import sys
from collections import namedtuple
from functools import wraps
import warnings
"""
Python 3 Stuff
=============================================================================
"""
PY3 = sys.version_info[0] == 3
if PY3: # pragma: no cover
string_type = str
text_type = str
int2str = chr
iterrange = range
else: # pragma: no cover
string_type = basestring # noqa
text_type = unicode # noqa
int2str = unichr # noqa
iterrange = xrange # noqa
"""
Constants you might want to modify
-----------------------------------------------------------------------------
"""
BLOCK_LEVEL_ELEMENTS = [
# Elements which are invalid to wrap in a `<p>` tag.
# See http://w3c.github.io/html/grouping-content.html#the-p-element
'address', 'article', 'aside', 'blockquote', 'details', 'div', 'dl',
'fieldset', 'figcaption', 'figure', 'footer', 'form', 'h1', 'h2', 'h3',
'h4', 'h5', 'h6', 'header', 'hr', 'main', 'menu', 'nav', 'ol', 'p', 'pre',
'section', 'table', 'ul',
# Other elements which Markdown should not be mucking up the contents of.
'canvas', 'dd', 'dt', 'group', 'iframe', 'li', 'math', 'noscript', 'output',
'progress', 'script', 'style', 'tbody', 'td', 'th', 'thead', 'tr', 'video'
]
# Placeholders
STX = '\u0002' # Use STX ("Start of text") for start-of-placeholder
ETX = '\u0003' # Use ETX ("End of text") for end-of-placeholder
INLINE_PLACEHOLDER_PREFIX = STX+"klzzwxh:"
INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX
INLINE_PLACEHOLDER_RE = re.compile(INLINE_PLACEHOLDER % r'([0-9]+)')
AMP_SUBSTITUTE = STX+"amp"+ETX
HTML_PLACEHOLDER = STX + "wzxhzdk:%s" + ETX
HTML_PLACEHOLDER_RE = re.compile(HTML_PLACEHOLDER % r'([0-9]+)')
TAG_PLACEHOLDER = STX + "hzzhzkh:%s" + ETX
"""
Constants you probably do not need to change
-----------------------------------------------------------------------------
"""
RTL_BIDI_RANGES = (
('\u0590', '\u07FF'),
# Hebrew (0590-05FF), Arabic (0600-06FF),
# Syriac (0700-074F), Arabic supplement (0750-077F),
# Thaana (0780-07BF), Nko (07C0-07FF).
('\u2D30', '\u2D7F') # Tifinagh
)
# Extensions should use "markdown.util.etree" instead of "etree" (or do `from
# markdown.util import etree`). Do not import it by yourself.
try: # pragma: no cover
# Is the C implementation of ElementTree available?
import xml.etree.cElementTree as etree
from xml.etree.ElementTree import Comment
# Serializers (including ours) test with non-c Comment
etree.test_comment = Comment
if etree.VERSION < "1.0.5":
raise RuntimeError("cElementTree version 1.0.5 or higher is required.")
except (ImportError, RuntimeError): # pragma: no cover
# Use the Python implementation of ElementTree?
import xml.etree.ElementTree as etree
if etree.VERSION < "1.1":
raise RuntimeError("ElementTree version 1.1 or higher is required")
"""
AUXILIARY GLOBAL FUNCTIONS
=============================================================================
"""
def deprecated(message):
"""
Raise a DeprecationWarning when wrapped function/method is called.
Borrowed from https://stackoverflow.com/a/48632082/866026
"""
def deprecated_decorator(func):
@wraps(func)
def deprecated_func(*args, **kwargs):
warnings.warn(
"'{}' is deprecated. {}".format(func.__name__, message),
category=DeprecationWarning,
stacklevel=2
)
return func(*args, **kwargs)
return deprecated_func
return deprecated_decorator
@deprecated("Use 'Markdown.is_block_level' instead.")
def isBlockLevel(tag):
"""Check if the tag is a block level HTML tag."""
if isinstance(tag, string_type):
return tag.lower().rstrip('/') in BLOCK_LEVEL_ELEMENTS
# Some ElementTree tags are not strings, so return False.
return False
def parseBoolValue(value, fail_on_errors=True, preserve_none=False):
"""Parses a string representing bool value. If parsing was successful,
returns True or False. If preserve_none=True, returns True, False,
or None. If parsing was not successful, raises ValueError, or, if
fail_on_errors=False, returns None."""
if not isinstance(value, string_type):
if preserve_none and value is None:
return value
return bool(value)
elif preserve_none and value.lower() == 'none':
return None
elif value.lower() in ('true', 'yes', 'y', 'on', '1'):
return True
elif value.lower() in ('false', 'no', 'n', 'off', '0', 'none'):
return False
elif fail_on_errors:
raise ValueError('Cannot parse bool value: %r' % value)
def code_escape(text):
"""Escape code."""
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text
"""
MISC AUXILIARY CLASSES
=============================================================================
"""
class AtomicString(text_type):
"""A string which should not be further processed."""
pass
class Processor(object):
def __init__(self, md=None):
self.md = md
@property
@deprecated("Use 'md' instead.")
def markdown(self):
# TODO: remove this later
return self.md
class HtmlStash(object):
"""
This class is used for stashing HTML objects that we extract
in the beginning and replace with place-holders.
"""
def __init__(self):
""" Create a HtmlStash. """
self.html_counter = 0 # for counting inline html segments
self.rawHtmlBlocks = []
self.tag_counter = 0
self.tag_data = [] # list of dictionaries in the order tags appear
def store(self, html):
"""
Saves an HTML segment for later reinsertion. Returns a
placeholder string that needs to be inserted into the
document.
Keyword arguments:
* html: an html segment
Returns : a placeholder string
"""
self.rawHtmlBlocks.append(html)
placeholder = self.get_placeholder(self.html_counter)
self.html_counter += 1
return placeholder
def reset(self):
self.html_counter = 0
self.rawHtmlBlocks = []
def get_placeholder(self, key):
return HTML_PLACEHOLDER % key
def store_tag(self, tag, attrs, left_index, right_index):
"""Store tag data and return a placeholder."""
self.tag_data.append({'tag': tag, 'attrs': attrs,
'left_index': left_index,
'right_index': right_index})
placeholder = TAG_PLACEHOLDER % str(self.tag_counter)
self.tag_counter += 1 # equal to the tag's index in self.tag_data
return placeholder
# Used internally by `Registry` for each item in its sorted list.
# Provides an easier to read API when editing the code later.
# For example, `item.name` is more clear than `item[0]`.
_PriorityItem = namedtuple('PriorityItem', ['name', 'priority'])
class Registry(object):
"""
A priority sorted registry.
A `Registry` instance provides two public methods to alter the data of the
registry: `register` and `deregister`. Use `register` to add items and
`deregister` to remove items. See each method for specifics.
When registering an item, a "name" and a "priority" must be provided. All
items are automatically sorted by "priority" from highest to lowest. The
"name" is used to remove ("deregister") and get items.
A `Registry` instance it like a list (which maintains order) when reading
data. You may iterate over the items, get an item and get a count (length)
of all items. You may also check that the registry contains an item.
When getting an item you may use either the index of the item or the
string-based "name". For example:
registry = Registry()
registry.register(SomeItem(), 'itemname', 20)
# Get the item by index
item = registry[0]
# Get the item by name
item = registry['itemname']
When checking that the registry contains an item, you may use either the
string-based "name", or a reference to the actual item. For example:
someitem = SomeItem()
registry.register(someitem, 'itemname', 20)
# Contains the name
assert 'itemname' in registry
# Contains the item instance
assert someitem in registry
The method `get_index_for_name` is also available to obtain the index of
an item using that item's assigned "name".
"""
def __init__(self):
self._data = {}
self._priority = []
self._is_sorted = False
def __contains__(self, item):
if isinstance(item, string_type):
# Check if an item exists by this name.
return item in self._data.keys()
# Check if this instance exists.
return item in self._data.values()
def __iter__(self):
self._sort()
return iter([self._data[k] for k, p in self._priority])
def __getitem__(self, key):
self._sort()
if isinstance(key, slice):
data = Registry()
for k, p in self._priority[key]:
data.register(self._data[k], k, p)
return data
if isinstance(key, int):
return self._data[self._priority[key].name]
return self._data[key]
def __len__(self):
return len(self._priority)
def __repr__(self):
return '<{0}({1})>'.format(self.__class__.__name__, list(self))
def get_index_for_name(self, name):
"""
Return the index of the given name.
"""
if name in self:
self._sort()
return self._priority.index(
[x for x in self._priority if x.name == name][0]
)
raise ValueError('No item named "{0}" exists.'.format(name))
def register(self, item, name, priority):
"""
Add an item to the registry with the given name and priority.
Parameters:
* `item`: The item being registered.
* `name`: A string used to reference the item.
* `priority`: An integer or float used to sort against all items.
If an item is registered with a "name" which already exists, the
existing item is replaced with the new item. Tread carefully as the
old item is lost with no way to recover it. The new item will be
sorted according to its priority and will **not** retain the position
of the old item.
"""
if name in self:
# Remove existing item of same name first
self.deregister(name)
self._is_sorted = False
self._data[name] = item
self._priority.append(_PriorityItem(name, priority))
def deregister(self, name, strict=True):
"""
Remove an item from the registry.
Set `strict=False` to fail silently.
"""
try:
index = self.get_index_for_name(name)
del self._priority[index]
del self._data[name]
except ValueError:
if strict:
raise
def _sort(self):
"""
Sort the registry by priority from highest to lowest.
This method is called internally and should never be explicitly called.
"""
if not self._is_sorted:
self._priority.sort(key=lambda item: item.priority, reverse=True)
self._is_sorted = True
# Deprecated Methods which provide a smooth transition from OrderedDict
def __setitem__(self, key, value):
""" Register item with priorty 5 less than lowest existing priority. """
if isinstance(key, string_type):
warnings.warn(
'Using setitem to register a processor or pattern is deprecated. '
'Use the `register` method instead.', DeprecationWarning
)
if key in self:
# Key already exists, replace without altering priority
self._data[key] = value
return
if len(self) == 0:
# This is the first item. Set priority to 50.
priority = 50
else:
self._sort()
priority = self._priority[-1].priority - 5
self.register(value, key, priority)
else:
raise TypeError
def __delitem__(self, key):
""" Deregister an item by name. """
if key in self:
self.deregister(key)
warnings.warn(
'Using del to remove a processor or pattern is deprecated. '
'Use the `deregister` method instead.', DeprecationWarning
)
else:
raise TypeError
def add(self, key, value, location):
""" Register a key by location. """
if len(self) == 0:
# This is the first item. Set priority to 50.
priority = 50
elif location == '_begin':
self._sort()
# Set priority 5 greater than highest existing priority
priority = self._priority[0].priority + 5
elif location == '_end':
self._sort()
# Set priority 5 less than lowest existing priority
priority = self._priority[-1].priority - 5
elif location.startswith('<') or location.startswith('>'):
# Set priority halfway between existing priorities.
i = self.get_index_for_name(location[1:])
if location.startswith('<'):
after = self._priority[i].priority
if i > 0:
before = self._priority[i-1].priority
else:
# Location is first item`
before = after + 10
else:
# location.startswith('>')
before = self._priority[i].priority
if i < len(self) - 1:
after = self._priority[i+1].priority
else:
# location is last item
after = before - 10
priority = before - ((before - after) / 2)
else:
raise ValueError('Not a valid location: "%s". Location key '
'must start with a ">" or "<".' % location)
self.register(value, key, priority)
warnings.warn(
'Using the add method to register a processor or pattern is deprecated. '
'Use the `register` method instead.', DeprecationWarning
)
|
|
import math
import pygame
import os
#### Goal Statement ####
## This program is intended to test a very simple model of gravitic attraction.
## The way it works is as follows:
## Every update, each object with gravity would tell each other update with gravity to adjust their velocities by a tiny amount in the direction of that object.
## Then all the objects would move according to their velocity values.
## - Objects touching eachother should not move (( yet ))
## - Use the fp-int split between movement-calculation locations and pixel locations from Arinoid for movement calculations
## - You may also want to preempt rounding errors by using the max_speed stuff from Asteroids, since tiny numbers from the inverse square law might create numerical instabilities
#### Constants ####
## Is technically also the playing field --v
SCREEN_BOUNDARY_RECTANGLE = pygame.Rect(0, 0, 1200, 700)
WINDOW_CAPTION = 'Gravitation version 0.3'
MAX_FRAMES_PER_SECOND = 60
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
#### Classes ####
class Spritesheet:
''' Loads spritesheets for graphical display purposes. '''
## An easily reusable, general-purpose class for working with sprites in pygame!
def __init__(self, supplied_filename):
## Load the file and save it as a variable in this object.
## os.path.join() appends the path in the directory tree to the name of the file call.
## (( I think join() returns a joined string, and os.path (???) sets or supplies a path to access a file, which is what's being requested as a parameter for pygame.image.load() ))
## Operating system-independent.
## Also, convert() the data to the correct pixel format so pygame can use it quickly.
self.sheet_file = pygame.image.load(os.path.join('folder containing data for GravitationTest', supplied_filename)).convert()
def get_an_image_from_this_spritesheet(self, image_rectangle_measurements, colorkey=None):
''' Create a pygame.Surface() holding a rectangular slice of a spritesheet from specified coordinates; can incorporate transparency. Return that pygame.Surface() '''
## First, turn the numbers describing the rectangular area on the spritesheet into a pygame.Rect() object, which we call the image_slice_rectangle.
image_slice_rectangle = pygame.Rect(image_rectangle_measurements)
## Then create an empty pygame.Surface() and convert() it to the pygame pixel format.
## Uses the image_slice_rectangle as a size, because that's what's going in this buffer!
image_slice_buffer = pygame.Surface(image_slice_rectangle.size)
## Transfer (blit) the area of self.sheet_file (the spritesheet) described by image_slice_rectangle onto the image_slice_buffer ( perfectly matching upper left corners with (0, 0) ).
image_slice_buffer.blit(self.sheet_file, (0, 0), image_slice_rectangle)
## Using transparency:
## By default, the image will not be transparent in any part. That's the colorkey=None in the parameters for this function.
if colorkey is not None:
if colorkey is -1:
## If we pass -1 as a colorkey, the game will use the color of the pixel in the upper left corner as its transparency-defining color.
## assigned_colorkey_variable = foo_image.get_at((x, y)) gets the color of the pixel at (x, y) in the image it's called on, and returns it as the assigned_colorkey_variable
colorkey = image_slice_buffer.get_at((0, 0))
## Otherwise, colorkey will use whatever argument is passed to it as a user-specified transparency color for this sprite/image slice/thing.
## The flag RLEACCEL does something to make using the resulting image faster.
## v--- from http://www.pygame.org/docs/ref/surface.html
## " The optional flags argument can be set to pygame.RLEACCEL to provide better performance on non accelerated displays. An RLEACCEL Surface will be slower to modify, but quicker to blit as a source. "
image_slice_buffer.set_colorkey(colorkey, pygame.RLEACCEL)
## Note: The returned object is a pygame.Surface()
return image_slice_buffer
def get_multiple_images_from_this_spritesheet(self, supplied_image_rectangle_measurements, colorkey=None):
''' Create a list populated by pygame.Surface() objects -- i.e., images -- made using the get_an_image_from_this_spritesheet(image_rectangle_measurements, colorkey) function. Parameters require one to many rectangles and zero to one colorkeys. '''
## NOTE: The tutorial had colorkey's default set to None, but that did not go over so well with new .bmp sheets for some reason, so I'm forcing it to use the 0,0 colorkey from now on.
## First make an empty list to be filled with images, each of which is a pygame.Surface()
list_of_images = []
## Iterate through the rectangle measurements handed to this function, and append pygame.Surface() objects generated with get_an_image_from_this_spritesheet() to the list_of_images for each one.
for each_rectangle_measurement in supplied_image_rectangle_measurements:
list_of_images.append(self.get_an_image_from_this_spritesheet(each_rectangle_measurement, colorkey))
## Once again, to be clear for beginners like me: This function returns a list full of pygame.Surface() objects.
return list_of_images
class GravityWell(pygame.sprite.Sprite):
''' Planets and stuff. They should both affect and be affected by gravity. '''
## The way this is going down is as follows:
## - Every GravityWell has an x_location and a y_location
## - - This will use self.rect.centerx and self.rect.centery
## - Every GravityWell has an x_velocity and a y_velocity
## - Every GravityWell has a mass
## - Every time update() is called on a GravityWell, it will For-loop over the entire group_of_gravity_wells
## - - If it finds itself while iterating over the group_of_gravity_wells, it will skip itself and go to the next GravityWell
## - For every GravityWell, it will determine the sine and cosine of the hypotenuse formed by the distance between the two GravityWells' centers
## - Then it finds another ratio, the inverse_square_ratio, which is equal to ( 1 / (the_distance_between_them ** 2) )
## - It will then multiply its mass value by the inverse_square_ratio, coming to a much smaller number, the gravitic_acceleration_value
## - Then the gravitic_acceleration_value will be multiplied by both the sine and cosine to generate the new x_acceleration and y_acceleration values, depending on which is needed for which.
## - I'll also figure out if multiplication by -1 is needed at that point, if that calculation comes to a repulsive gravity instead of an attractive one.
## NOTE: Because it is not specific to any particular GravityWell, but rather acts on all of them once per RenderUpdates(), calculate_gravity_and_adjust_velocities_on_all_gravity_wells() is a top-level function.
def __init__(self, initial_x_position, initial_y_position, initial_x_velocity, initial_y_velocity, initial_mass, is_immobile=False):
pygame.sprite.Sprite.__init__(self, self.sprite_groups_this_object_is_inside)
## EVEN THOUGH GravityWell doesn't actually accept graphix, all of its inheritor classes will, so this should still go through. Right?
## DEBUG -- skip this, see if it matters to put it in Planet instead of GravityWell
# ## The fact I called pygame.sprite.Sprite.__init__() before self.image.get_rect() means that the inheritor class's image will be initialized. Right??
# self.rect = self.image.get_rect()
self.rect.centerx = initial_x_position
self.rect.centery = initial_y_position
self.current_x_velocity = float(initial_x_velocity)
self.current_y_velocity = float(initial_y_velocity)
self.current_mass = initial_mass
self.is_immobile = is_immobile
## When a GravityWell is spawning in, make sure you float its values for accurate mathings:
self.convert_centerx_and_centery_to_floating_point()
## This feels so, so wrong, but I know it'll work. May all the gods of programming forgive me.
self.x_velocity_buffer = 0.0
self.y_velocity_buffer = 0.0
#debug3
self.convert_centerx_and_centery_to_floating_point()
def convert_centerx_and_centery_to_floating_point(self):
''' Convert self.rect.centerx and self.rect.centery to floating point numbers for smoother ball movement calculations. '''
## Pixels don't come in fractions. This method will be used to switch between floating point numbers and integers on the fly for smoother ball movement, giving more detail to the physics engine.
## (( float() is a built-in Python function, as is int() ))
self.floating_point_rect_centerx = float(self.rect.centerx)
self.floating_point_rect_centery = float(self.rect.centery)
def set_centerx_and_centery_values_to_ints_of_the_floating_point_values(self):
''' Set self.rect.centerx and self.rect.centery to the self.floating_point_rect_centerfoo numbers for each. '''
## Turns the floating point numbers from the above method into ints for pygame's Rect class's various pixel purposes.
## This is a CRITICAL PART of moving things that store their x, y locations as floats!
self.rect.centerx = int(self.floating_point_rect_centerx)
self.rect.centery = int(self.floating_point_rect_centery)
def update(self):
''' In addition to normal update() stuff ((if there is any now that I'm redefining this function)), move the GravityWell object according to its current_x_velocity and current_y_velocity values. '''
## NOTE: "update" implies we are updating THIS OBJECT. Only.
## calculate_gravity_and_adjust_velocities_on_all_gravity_wells() is a top-level function that processes BEFORE RenderUpdates() in the game loop.
#### THIS IS ALSO THE MOVE FUNCTION FOR NOW, POSSIBLY ALSO FOR ALWAYS ####
## The GravityWell shouldn't go out of the playing_field_rectangle, so restrict it to the playing_field_rectangle using pygame's built-in clamp_ip() function, to "clamp in place" (?) the sprite inside the playing_field_rectangle given to its class as a reference variable.
## DEBUG take 3: reenable clamping later
#self.rect.clamp_ip(self.playing_field.playing_field_rectangle)
## Also it needs to be refloated AFTER being clamped. Do not change the order of [[ clamp_ip() --> convert... --> movement code --> set_center... ]]
#debug3
#self.convert_centerx_and_centery_to_floating_point()
## This is the part that seems so horribly wrong.
## The purpose of this disaster is to try and make fractions of a pixel worth of velocity smoothly affect movement.
## The buffer stores clipped values from -1 to +1.
## If abs(current_velocity) < 1:
## add the acceleration to the buffer
## If abs(buffer) > 1:
## add 1 to the_proper_center_axis, AND subtract 1 from the buffer
## If abs(current_velocity) > 1:
## add the buffer to current_acceleration AND zero the buffer
if not self.is_immobile:
## x buffer
if (self.current_x_velocity < 1.0) and (self.current_x_velocity > -1.0):
self.x_velocity_buffer += self.current_x_velocity
if (self.x_velocity_buffer >= 1.0) or (self.x_velocity_buffer <= -1.0):
## Velocity buffer overflow. Increment position.
## note: a number divided by the abs. of itself gives either +1 or -1 depending on its original sign (it preserves its sign)
self.floating_point_rect_centerx += (self.x_velocity_buffer / abs(self.x_velocity_buffer))
if (self.x_velocity_buffer / abs(self.x_velocity_buffer)) == -1.0:
self.x_velocity_buffer += 1.0
else:
self.x_velocity_buffer -= 1.0
#self.x_velocity_buffer -= (self.x_velocity_buffer / abs(self.x_velocity_buffer))
if (self.current_x_velocity > 1.0) and (self.current_x_velocity < -1.0):
self.x_velocity_buffer = 0.0
## y buffer
if (self.current_y_velocity < 1.0) and (self.current_y_velocity > -1.0):
self.y_velocity_buffer += self.current_y_velocity
if (self.y_velocity_buffer >= 1.0) or (self.y_velocity_buffer <= -1.0):
## Velocity buffer overflow. Increment position.
## note: a number divided by the abs. of itself gives either +1 or -1 depending on its original sign (it preserves its sign)
self.floating_point_rect_centery += (self.y_velocity_buffer / abs(self.y_velocity_buffer))
if (self.y_velocity_buffer / abs(self.y_velocity_buffer)) == -1.0:
self.y_velocity_buffer += 1.0
else:
self.y_velocity_buffer -= 1.0
#self.y_velocity_buffer -= (self.y_velocity_buffer / abs(self.y_velocity_buffer))
if (self.current_y_velocity > 1.0) and (self.current_y_velocity < -1.0):
self.y_velocity_buffer = 0.0
print("\ncurrent_x_velocity == " + str(self.current_x_velocity))
print("current_y_velocity == " + str(self.current_y_velocity))
print("x_velocity_buffer == " + str(self.x_velocity_buffer))
print("y_velocity_buffer == " + str(self.y_velocity_buffer) + "\n")
#### THIS IS THE PART YOU ARE PROBABLY THINKING ABOUT WHEN YOU ARE THINKING ABOUT MOVING ####
## The math affects the FP numbers. Once the math is covered, convert the numbers back to ints.
## immobility check
if self.is_immobile:
pass
else:
self.floating_point_rect_centerx = self.floating_point_rect_centerx + self.current_x_velocity
self.floating_point_rect_centery = self.floating_point_rect_centery + self.current_y_velocity
## Then actually move the thing as needed.
self.set_centerx_and_centery_values_to_ints_of_the_floating_point_values()
if not self.is_immobile:
print("\n\n\ncurrent_x_velocity == " + str(self.current_x_velocity))
print("current_y_velocity == " + str(self.current_y_velocity))
#print("rect.centerx == " + str(self.rect.centerx))
#print("rect.centery == " + str(self.rect.centery))
print("floating_point_rect_centerx == " + str(self.floating_point_rect_centerx))
print("floating_point_rect_centery == " + str(self.floating_point_rect_centery) + "\n")
class Planet(GravityWell):
''' A giant ball of rock and/or gas and/or various slippery substances. Is a GravityWell. '''
## The Planet class is distinct from the GravityWell class because I want to have explorable planets with their own maps once I get the space business sorted out.
def __init__(self, initial_x_position, initial_y_position, initial_x_velocity, initial_y_velocity, initial_mass, supplied_planet_graphic_index, is_immobile=False):
## Image handling goes in the Planet class. GravityWell does not do image handling, BUT it DOES require it to be done.
## The image component of the pygame.sprite.Sprite has to be named "image" for it to interact properly with other Sprite class methods.
self.image = pygame.Surface((30, 30)).convert()
## DEBUG1 got to find out why colorkey isn't working the way I expect
## First fill it with the WHITE constant.
#self.image.fill(WHITE)
## Then set the colorkey to WHITE. For consistency's sake, pick it up off the Surface.
#self.image.set_colorkey(self.image.get_at((0, 0)), pygame.RLEACCEL)
## DEBUG1 it can't be here, right?? is this where colorkey ought to go? It can't be here...!
## DEBUG2 I think it is after the following line. Somehow.
## thing_to_blit_to.blit(sprite_to_blit_to_the_thing, (upperleft_location_to_blit_to_on_thing_you're_blitting_to))
self.image.blit(self.list_full_of_reference_planet_surface_objects[supplied_planet_graphic_index], (0, 0))
## DEBUG2
## Okay, so this has to be called here. BUT for some reason, the colorkey=-1 thing STILL has an effect. Specifically it turns the background black.
## WHY!
self.image.set_colorkey(self.image.get_at((0, 0)), pygame.RLEACCEL)
## This has to be figured out somehow, since it isn't at all how it worked in Arinoid. Hrm.
self.rect = self.image.get_rect()
## IMPORTANT! Only init the GravityWell component AFTER you get the image and rect.
GravityWell.__init__(self, initial_x_position, initial_y_position, initial_x_velocity, initial_y_velocity, initial_mass, is_immobile=is_immobile)
class PlayingField:
''' Define the playing field for the game. '''
## This may be changed or added to later on. Such as when I start having to think about the two different map windows: Aurora and Outpost.
## IMPORTANT: SEE ArinoidTest FOR DEEPEST COMMENTS EXPLAINING THIS CLASS.
##~~ Scaling and centering the tile-based map ~~#
## These values represent the width and height of each tile in the spritesheet:
tile_x_pixel_measurement = 30
tile_y_pixel_measurement = 30
## The number of game-tiles in the playing field's x dimension:
## OLD:
#playing_field_width_in_tiles = 20
## NEW:
playing_field_width_in_tiles = SCREEN_BOUNDARY_RECTANGLE.width // tile_x_pixel_measurement
print("\nplaying_field_width_in_tiles == " + str(playing_field_width_in_tiles))
## y axis:
## OLD:
#playing_field_height_in_tiles = 15
## NEW:
playing_field_height_in_tiles = SCREEN_BOUNDARY_RECTANGLE.height // tile_y_pixel_measurement
print("\nplaying_field_height_in_tiles == " + str(playing_field_height_in_tiles))
top_x_position_offset = ((SCREEN_BOUNDARY_RECTANGLE.width // tile_x_pixel_measurement) * tile_x_pixel_measurement)
top_y_position_offset = ((SCREEN_BOUNDARY_RECTANGLE.height // tile_y_pixel_measurement) * tile_y_pixel_measurement)
## Note: The above operation is NOT the same as floor dividing by 1, which only shaves off fractional components if a number already has them.
top_x_position_offset = ( (SCREEN_BOUNDARY_RECTANGLE.width - top_x_position_offset) // 2 )
top_y_position_offset = ( (SCREEN_BOUNDARY_RECTANGLE.height - top_y_position_offset) // 2 )
playing_field_rectangle = pygame.Rect((tile_x_pixel_measurement + top_x_position_offset), (tile_y_pixel_measurement + top_y_position_offset), (tile_x_pixel_measurement * playing_field_width_in_tiles), (tile_y_pixel_measurement * playing_field_height_in_tiles))
def __init__(self):
## Going to keep with the .convert() on all the new Surfaces, to keep everything running fast, supposedly.
self.playing_field_background_surface_object = pygame.Surface(SCREEN_BOUNDARY_RECTANGLE.size).convert()
def draw_tile(self, supplied_tile_surface, x_tile_value, y_tile_value):
''' Use tile values provided to the PlayingField class at program initialization to display a single PlayingField tile at a specified location on the PlayingField's background_surface_object. '''
## First, multiply the x and y tile placement values by the size of each tile in pixels.
x_placement_upperleft_offset_in_pixels = (self.tile_x_pixel_measurement * x_tile_value)
y_placement_upperleft_offset_in_pixels = (self.tile_y_pixel_measurement * y_tile_value)
## Then add the border that was determined in the base Arena class values:
x_placement_upperleft_offset_in_pixels += self.top_x_position_offset
y_placement_upperleft_offset_in_pixels += self.top_y_position_offset
## Transfer (blit) the supplied_tile_surface to the background_surface_object at the above-determined location.
self.playing_field_background_surface_object.blit(supplied_tile_surface, (x_placement_upperleft_offset_in_pixels, y_placement_upperleft_offset_in_pixels))
def make_background_using_one_tile_graphic(self, tile_image_index_number):
''' Creates the playing field background out of tiles with the specified tile image index number. '''
## NOTE: This assumes the background will have actual tile graphics at some point in teh Majickal Futurez one one ! spelling error self-conscious nervous laughter.
## NOTE: This function creates the background for the ENTIRE playing field in ONE pass. Using ONE tile index number.
for x in range(self.playing_field_width_in_tiles): # "For each row"
for y in range(self.playing_field_height_in_tiles): # "For each column"
## OLD ---v
## Note the + 1 in the line below -- that's because there's a one-tile-wide border around the entire PlayingField.
#self.draw_tile(self.list_full_of_reference_tile_surface_objects[tile_image_index_number], x + 1, y + 1)
## NEW ---v
## I removed the 1-tile border.
self.draw_tile(self.list_full_of_reference_tile_surface_objects[tile_image_index_number], x, y)
#### Functions ####
def calculate_gravity_and_adjust_velocities_on_all_gravity_wells(supplied_group_of_all_gravity_wells):
for each_gravity_well_object in supplied_group_of_all_gravity_wells:
for each_other_object in supplied_group_of_all_gravity_wells:
## This prevents things from trying to gravitify themselves:
if each_gravity_well_object == each_other_object:
## Print testing proves this check works perfectly.
pass
else:
## With that out of the way...
## Gravity is like acceleration towards the other object's center of mass.
## Note: In order for orbits to look cool, we have to figure out what value of acceleration is necessary to make things circle around eachother at some distance. This is very much a test and check thing untill I know the logic better.
## First, get the hypotenuse between their two centerpoints, as if it was a right triangle:
adjacent = (each_gravity_well_object.floating_point_rect_centerx - each_other_object.floating_point_rect_centerx)
opposite = (each_gravity_well_object.floating_point_rect_centery - each_other_object.floating_point_rect_centery)
#print("adjacent == " + str(adjacent))
#print("opposite == " + str(opposite))
## (( NOTE!! Consider making this run faster. To save computing resources, try to only sqrt once (and only if necessary). ))
## (( This means try an escape check for point1 == point2 (identical location)... keeping the number unsqrted untill you are done with the calcs... etc. ))
distance_between_these_two_objects_as_a_hypotenuse = math.sqrt((adjacent ** 2) + (opposite ** 2))
#print("distance_between_these_two_objects_as_a_hypotenuse == " + str(distance_between_these_two_objects_as_a_hypotenuse) + "\n")
## Dummy code:
#if adjacent >= 0.0:
# each_other_object.current_x_velocity += 0.01
#elif adjacent < 0.0:
# each_other_object.current_x_velocity += -0.01
#if opposite >= 0.0:
# each_other_object.current_y_velocity += 0.01
#elif opposite < 0.0:
# each_other_object.current_y_velocity += -0.01
## THE DUMMY CODE WORKS BEYOND MY WILDEST DREAMS.
#print("each_other_object.current_x_velocity == " + str(each_other_object.current_x_velocity))
#print("each_other_object.current_y_velocity == " + str(each_other_object.current_y_velocity))
## If things are going in straight lines, it's because you're not actually making smoothly curved accelerations.
## All those 0.01 constants ARE THE STRAIGHT LINES. :P
## Got to use the inverse square law and the mass values to figure out what to change their velocities by.
the_inverse_square_law_ratio_of_these_two_objects = ( 1 / (distance_between_these_two_objects_as_a_hypotenuse ** 2) )
the_sine = adjacent / distance_between_these_two_objects_as_a_hypotenuse
the_cosine = opposite / distance_between_these_two_objects_as_a_hypotenuse
## Debug: take 2
## Let's try dividing the sine and cosine by their sum, to get the ratios of sines and cosines per sinecosine.
## Reason for this is just like in Asteroids. Sine and cosine are ~.71 at 45 degrees, summing to 1.42, but we need something that will modify a velocity vector without stretching it further out with extra hypotenuse length.
sum_of_sine_and_cosine = abs(the_sine) + abs(the_cosine)
## The part after the * is the way we preserve the sign of the original sine and cosine. It's dividing the number by its positive version. See: [-1 / 1 = -1] and [1 / 1 = 1]
the_sine, the_cosine = ((abs(the_sine) / sum_of_sine_and_cosine) * (the_sine / abs(the_sine))), ((abs(the_cosine) / sum_of_sine_and_cosine) * (the_cosine / abs(the_cosine)))
print("\n\nthe_sine == " + str(the_sine))
print("the_cosine == " + str(the_cosine))
print("the_inverse_square_law_ratio == " + str(the_inverse_square_law_ratio_of_these_two_objects))
current_acceleration_value = (each_gravity_well_object.current_mass * each_other_object.current_mass) * the_inverse_square_law_ratio_of_these_two_objects
print("current_acceleration_value == " + str(current_acceleration_value))
## Okay, this seems to be the real problem.
## current_acceleration_value is not supposed to be assigned to BOTH x and y velocities. It's the hypotenuse!!! -_-
## debug: take 1
## Should I just multiply it by the sine and cosine for x and y, respectively?
## Sine is adjacent / hypotenuse... and cosine is opposite / hypotenuse...
## So that sounds kind of truthy!
## If sine is x_differences / hypotenuse, then multiplying the new hypotenuse-oid value (current_acceleration_value) by the sine should yield the right number.
## I think it also means that the sign will be preserved and won't have to be added in, as in the old code below:
## debug: take 1: OLD CODE
#if adjacent > 0.0:
# each_other_object.current_x_velocity += ((current_acceleration_value_ * 50)
#elif adjacent < 0.0:
# each_other_object.current_x_velocity += (-current_acceleration_value * 50)
#if opposite > 0.0:
# each_other_object.current_y_velocity += (current_acceleration_value * 50)
#elif opposite < 0.0:
# each_other_object.current_y_velocity += (-current_acceleration_value * 50)
## Also I'm adding a constant to it, to make it a noticeable number. Sine and cosine are ratios, and can get pretty small sometimes.
## debug: take 1: NEW CODE
## debug: take 2: removing the * 50 constant to see if that causes the jumping itself... or if there's something even stranger going on?
each_other_object.current_x_velocity += ((the_sine) * current_acceleration_value)
each_other_object.current_y_velocity += ((the_cosine) * current_acceleration_value)
#### The Main Program Function ####
def main():
''' The game's main function. Does initialization and main-looping. '''
#### Initialization ####
## Initialize pygame before using it.
pygame.init()
## Initialize the screen, using the boundary rectangle for the screen size.
screen = pygame.display.set_mode(SCREEN_BOUNDARY_RECTANGLE.size)
## Put a title on the window's movement bar:
pygame.display.set_caption(WINDOW_CAPTION)
##~~ Init the Spritesheet ~~##
## Make an instance of the game's spritesheet. This is the graphics of the entire game.
the_main_spritesheet_object = Spritesheet('gravitation_test_master_spritesheet.gif')
## And also set the graphics indices to point to specific parts of that spritesheet.
PlayingField.list_full_of_reference_tile_surface_objects = the_main_spritesheet_object.get_multiple_images_from_this_spritesheet([ (1, 1, 30, 30) # 0 - should look like boring old stars, for now
])
## Planet grafix
Planet.list_full_of_reference_planet_surface_objects = the_main_spritesheet_object.get_multiple_images_from_this_spritesheet([ (32, 1, 30, 30)], colorkey=-1) # 0 - this is
## Create the background, which for now is just a sheet of blackness.
## Setting up a class-based system for this purpose early, so it can be interacted with later on.
the_playing_field_object = PlayingField() # Create the PlayingField object that the game takes place inside/infront of.
the_playing_field_object.make_background_using_one_tile_graphic(0) # (0) is the list_full_of_reference_tile_surface_objects[] index number of the tile that the background will be covered in.
GravityWell.playing_field = the_playing_field_object
##~~ Create the background ~~##
## This section is intended to be temporary while I get the game figured out.
## Though it might end up being efficient to keep nearly all of the PlayingField code anyways.
## Put it up on the screen even before the first RenderUpdates() call:
screen.blit(the_playing_field_object.playing_field_background_surface_object, (0, 0)) # See the Arena class for details on the first parameter. The second is the upperleft alignment with the screen Surface() object's upperleft; the border-tweaking math comes after this in code execution and is not directly reflected in this particular pair of zeroes.
pygame.display.update()
##~~ Keep track of sprites ~~##
## First make the Groups.
## RenderUpdates() is special to pygame's sprite handling.
group_of_all_sprites = pygame.sprite.RenderUpdates()
## Group()s are for various iterations and assignments not native to pygame.
group_of_gravity_wells = pygame.sprite.Group()
group_of_planets = pygame.sprite.Group()
## Second put the things in the Groups.
GravityWell.sprite_groups_this_object_is_inside = group_of_all_sprites, group_of_gravity_wells
## Uhh... Does having Planet as a subclass of GravityWell, and both Planet and GravityWell being in the group_of_all_sprites...
## mean that every Planet object will be RenderUpdates()'d twice per game loop?
## That would throw a huge monkey wrench in my assumptions about good class structuring!
## Hope pygame knows about this and only RenderUpdates() once per Sprite regardless of multi-group inclusion.
Planet.sprite_groups_this_object_is_inside = group_of_all_sprites, group_of_gravity_wells, group_of_planets
## Whatever. It looks like it works, so I guess my guess was correct, and this IS how it works. Huzzah?
##~~ Keep track of time ~~##
## Initialize the clock object, used to cap the framerate / to meter the program's temporal progression.
clock = pygame.time.Clock()
##~~ The Game Loop ~~##
## The game loop can be exited by using a return statement.
while 1:
#~ Get input ~#
for event in pygame.event.get():
## Quit events:
## v-- clicking the X v--- A convenient device for splitting conditionals over multiple lines!
if event.type == pygame.QUIT \
or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):
## hitting the esc key --^
return
if not group_of_planets:
Planet(500, 250, 0.4, -0.4, 1, 0)
Planet(600, 350, 0, 0, 155, 0, is_immobile=True) ## AHAH! Things need INERTIA! Mass should impart resistance to changes in velocity... Just try and make this is_immobile=False and see how it shoots off in a tango with that tiny planet, right away! Inertia should make this look much more realistic, I'd say.
## ... possibly... inertia will end up equating to some kind of ratio that makes Force from other GravityWells adjust its Velocity less per check.
## So relative to other GravityWells, things with inertia/mass/whatever it is will appear to change their movements more slowly.
## But I doubt that will automatically translate into speedy orbital stabilization.
## There is probably going to have to be a special algorithm for generating stable orbits. But if you skip straight to stable orbits, why not just make rotating circles? =\ It's true there would be elaborate orbits that look like satellite constellations... But I want to be able to change orbits mid-game!
Planet(700, 450, -0.4, 0.4, 1, 0)
Planet(550, 300, 0.2, -0.2, 0.1, 0)
#~ Clear sprites ~#
## NOTE: The playing_field_background_surface_object should just be a giant black surface equal to the size of the screen, for now.
group_of_all_sprites.clear(screen, the_playing_field_object.playing_field_background_surface_object)
#~ Update ~#
## Step one: The Gravitationating.
calculate_gravity_and_adjust_velocities_on_all_gravity_wells(group_of_gravity_wells)
## Step two: Rendermoving.
group_of_all_sprites.update()
#~ Redraw ~#
## "Dirty" rectangles are when you only update things that changed, rather than the entire screen. These are the regions that have to be redrawn.
## [...] v--- Returns a list of the regions which changed since the last update()
dirty_rectangles = group_of_all_sprites.draw(screen)
## Once we have the changed regions, we can update specifically those:
pygame.display.update(dirty_rectangles)
#~ Cap frame rate ~#
clock.tick(MAX_FRAMES_PER_SECOND)
#### Running the Program ####
## This line runs the game when the program is called up.
if __name__ == '__main__': main()
|
|
"""
Utilities to handle projects' structure
"""
import os
import re
from pathlib import Path
from typing import List, Optional
from controller import (
BACKUP_DIR,
DATA_DIR,
LOGS_FOLDER,
PROJECT_DIR,
SUBMODULES_DIR,
log,
print_and_exit,
)
from controller.utilities import git
NO_AUTHENTICATION = "NO_AUTHENTICATION"
NO_FRONTEND = "nofrontend"
ANGULAR = "angular"
GITKEEP = ".gitkeep"
class Project:
def __init__(self) -> None:
self.expected_main_folders: List[Path] = [PROJECT_DIR, DATA_DIR, SUBMODULES_DIR]
# Will be verifed by check and added by create
self.expected_folders: List[Path] = []
self.expected_files: List[Path] = []
# Copied as they are, no templating (used for binary files, like images)
self.raw_files: List[Path] = []
# Intended to be immutable, check will raise warning when differs
self.fixed_files: List[Path] = []
# Not verified, added by create if --add-optionals
self.optionals_folders: List[Path] = []
self.optionals_files: List[Path] = []
# Created in data if missing
self.data_folders: List[Path] = []
self.data_files: List[Path] = []
# check will raise an error if these files will be found
self.obsolete_files: List[Path] = []
self.suggested_gitkeep: List[Path] = []
def p_path(self, *args: str) -> Path:
return PROJECT_DIR.joinpath(self.project, *args)
def load_project_scaffold(
self, project: str, auth: Optional[str], services: Optional[List[str]] = None
) -> bool:
if services is None:
services = []
self.project = project
self.expected_folders.extend(self.expected_main_folders)
self.expected_folders.append(self.p_path("confs"))
self.expected_folders.append(self.p_path("builds"))
self.expected_folders.append(self.p_path("backend"))
self.expected_folders.append(self.p_path("backend", "endpoints"))
self.expected_folders.append(self.p_path("backend", "models"))
self.expected_folders.append(self.p_path("backend", "models", "emails"))
self.expected_folders.append(self.p_path("backend", "tasks"))
self.expected_folders.append(self.p_path("backend", "tests"))
self.expected_folders.append(self.p_path("backend", "cron"))
self.suggested_gitkeep.append(SUBMODULES_DIR.joinpath(GITKEEP))
self.suggested_gitkeep.append(DATA_DIR.joinpath(GITKEEP))
self.suggested_gitkeep.append(LOGS_FOLDER.joinpath(GITKEEP))
self.suggested_gitkeep.append(BACKUP_DIR.joinpath(GITKEEP))
self.suggested_gitkeep.append(self.p_path("backend", "cron", GITKEEP))
self.suggested_gitkeep.append(self.p_path("builds", GITKEEP))
self.suggested_gitkeep.append(self.p_path("backend", "endpoints", GITKEEP))
self.suggested_gitkeep.append(self.p_path("backend", "tasks", GITKEEP))
self.suggested_gitkeep.append(self.p_path("backend", "tests", GITKEEP))
self.suggested_gitkeep.append(
self.p_path("backend", "models", "emails", GITKEEP)
)
self.expected_files.append(self.p_path("project_configuration.yaml"))
self.expected_files.append(self.p_path("confs", "commons.yml"))
self.expected_files.append(self.p_path("confs", "development.yml"))
self.expected_files.append(self.p_path("confs", "production.yml"))
# Need to ensure mypy to correctly extract typing from the project module
self.expected_files.append(self.p_path("backend", "__init__.py"))
self.expected_files.append(self.p_path("backend", "initialization.py"))
self.expected_files.append(self.p_path("backend", "customization.py"))
self.expected_files.append(self.p_path("backend", "endpoints", "__init__.py"))
self.expected_files.append(self.p_path("backend", "models", "__init__.py"))
self.expected_files.append(self.p_path("backend", "tasks", "__init__.py"))
self.expected_files.append(self.p_path("backend", "tests", "__init__.py"))
self.expected_files.append(Path(".gitignore"))
self.expected_files.append(Path(".gitattributes"))
self.expected_files.append(Path(".isort.cfg"))
self.expected_files.append(Path("pyproject.toml"))
self.expected_files.append(Path(".flake8"))
self.expected_files.append(Path(".prettierignore"))
self.fixed_files.append(Path(".gitattributes"))
self.fixed_files.append(Path("pyproject.toml"))
if auth or services:
models = self.p_path("backend", "models")
if auth == "sqlalchemy" or "postgres" in services or "mysql" in services:
self.expected_files.append(models.joinpath("sqlalchemy.py"))
if auth == "neo4j" or "neo4j" in services:
self.expected_files.append(models.joinpath("neo4j.py"))
if auth == "mongo" or "mongo" in services:
self.expected_files.append(models.joinpath("mongo.py"))
self.optionals_folders.append(self.p_path("backend", "models", "emails"))
self.optionals_files.append(
self.p_path("backend", "models", "emails", "activate_account.html")
)
self.optionals_files.append(
self.p_path("backend", "models", "emails", "new_credentials.html")
)
self.optionals_files.append(
self.p_path("backend", "models", "emails", "reset_password.html")
)
self.optionals_files.append(
self.p_path("backend", "models", "emails", "update_credentials.html")
)
self.optionals_files.append(Path("codecov.yml"))
self.data_folders.extend(
[
LOGS_FOLDER,
BACKUP_DIR,
DATA_DIR.joinpath("uploads"),
]
)
# Removed since 0.7.1
self.obsolete_files.append(self.p_path("confs", "debug.yml"))
# Removed since 0.7.4
self.obsolete_files.append(SUBMODULES_DIR.joinpath("rapydo-confs"))
# Removed since 0.7.5
self.obsolete_files.append(SUBMODULES_DIR.joinpath("frontend"))
# Removed since 0.7.6
self.obsolete_files.append(self.p_path("backend", "apis"))
# Removed since 0.8
self.obsolete_files.append(self.p_path("backend", "models", "swagger.yaml"))
self.obsolete_files.append(self.p_path("backend", "endpoints", "profile.py"))
# Removed since 0.9
self.obsolete_files.append(self.p_path("backend", "initialization"))
self.obsolete_files.append(self.p_path("frontend", "assets", "favicon.ico"))
# Removed since 1.2
self.obsolete_files.append(Path(".pre-commit-config.yaml"))
return True
def load_frontend_scaffold(self, frontend: Optional[str]) -> bool:
self.frontend = frontend
if self.frontend is None or self.frontend == NO_FRONTEND:
log.debug("No frontend framework enabled")
return False
self.expected_folders.append(self.p_path("frontend"))
if self.frontend == ANGULAR:
self.expected_folders.extend(
[
self.p_path("frontend", "app"),
self.p_path("frontend", "styles"),
self.p_path("frontend", "integration"),
self.p_path("frontend", "assets"),
self.p_path("frontend", "assets", "favicon"),
]
)
self.suggested_gitkeep.append(
DATA_DIR.joinpath(self.project, "frontend", GITKEEP)
)
self.suggested_gitkeep.append(
self.p_path("frontend", "integration", GITKEEP)
)
self.expected_files.extend(
[
self.p_path("frontend", "package.json"),
self.p_path("frontend", "styles", "style.scss"),
self.p_path("frontend", "styles", "variables.scss"),
self.p_path("frontend", "app", "customization.ts"),
self.p_path("frontend", "app", "custom.module.ts"),
self.p_path("frontend", "app", "custom.navbar.ts"),
self.p_path("frontend", "app", "custom.footer.ts"),
self.p_path("frontend", "app", "custom.profile.ts"),
self.p_path("frontend", "app", "custom.navbar.links.html"),
self.p_path("frontend", "app", "custom.navbar.brand.html"),
self.p_path("frontend", "app", "custom.footer.html"),
self.p_path("frontend", "app", "custom.profile.html"),
self.p_path("frontend", "app", "types.ts"),
]
)
self.raw_files.extend(
[
# Generated with https://realfavicongenerator.net
self.p_path(
"frontend", "assets", "favicon", "android-chrome-192x192.png"
),
self.p_path("frontend", "assets", "favicon", "browserconfig.xml"),
self.p_path("frontend", "assets", "favicon", "favicon-32x32.png"),
self.p_path("frontend", "assets", "favicon", "mstile-150x150.png"),
self.p_path(
"frontend", "assets", "favicon", "safari-pinned-tab.svg"
),
self.p_path(
"frontend", "assets", "favicon", "apple-touch-icon.png"
),
self.p_path("frontend", "assets", "favicon", "favicon-16x16.png"),
self.p_path("frontend", "assets", "favicon", "favicon.ico"),
self.p_path("frontend", "assets", "favicon", "site.webmanifest"),
]
)
frontend_data_dir = DATA_DIR.joinpath(self.project, "frontend")
self.data_folders.extend(
[
frontend_data_dir,
frontend_data_dir.joinpath("app"),
frontend_data_dir.joinpath("node_modules"),
DATA_DIR.joinpath(self.project, "karma"),
DATA_DIR.joinpath(self.project, "cypress"),
]
)
self.data_files.extend(
[
frontend_data_dir.joinpath("angular.json"),
frontend_data_dir.joinpath("karma.conf.js"),
frontend_data_dir.joinpath("package.json"),
frontend_data_dir.joinpath("polyfills.ts"),
frontend_data_dir.joinpath("tsconfig.json"),
frontend_data_dir.joinpath("tsconfig.app.json"),
frontend_data_dir.joinpath("tsconfig.spec.json"),
frontend_data_dir.joinpath("tsconfig.server.json"),
frontend_data_dir.joinpath("cypress.json"),
]
)
self.obsolete_files.extend(
[
self.p_path("frontend", "app", "app.routes.ts"),
self.p_path("frontend", "app", "app.declarations.ts"),
self.p_path("frontend", "app", "app.providers.ts"),
self.p_path("frontend", "app", "app.imports.ts"),
self.p_path("frontend", "app", "app.custom.navbar.ts"),
self.p_path("frontend", "app", "app.custom.navbar.html"),
self.p_path("frontend", "app", "app.entryComponents.ts"),
self.p_path("frontend", "app", "app.home.ts"),
self.p_path("frontend", "app", "app.home.html"),
self.p_path("frontend", "app", "custom.declarations.ts"),
self.p_path("frontend", "app", "custom.routes.ts"),
self.p_path("frontend", "app", "custom.project.options.ts"),
# Removed since 1.0
frontend_data_dir.joinpath("browserslist"),
# Removed since 1.2 (replaced with scss in styles)
self.p_path("frontend", "css"),
]
)
return True
@staticmethod
def get_project(project: Optional[str], ignore_multiples: bool = False) -> str:
projects = os.listdir(PROJECT_DIR)
if project is None:
if len(projects) == 0:
print_and_exit("No project found (is {} folder empty?)", PROJECT_DIR)
if len(projects) > 1:
# It is used by the preliminary get used to load the commands
# In case of multiple projects without a proper definition in
# projectrc, the custom commands will not be loaded
if ignore_multiples:
return ""
print_and_exit(
"Multiple projects found, "
"please use --project to specify one of the following: {}",
", ".join(projects),
)
project = projects.pop()
elif project not in projects:
print_and_exit(
"Wrong project {}\nSelect one of the following: {}\n",
project,
", ".join(projects),
)
# In case of errors this function will exit
Project.check_invalid_characters(project)
if project in Project.reserved_project_names:
print_and_exit(
"You selected a reserved name, invalid project name: {}", project
)
return project
@staticmethod
def check_invalid_characters(project: str) -> None:
if len(project) <= 1:
print_and_exit("Wrong project name, expected at least two characters")
if not re.match("^[a-z][a-z0-9]+$", project):
# First character is expected to be a-z
tmp_str = re.sub("[a-z]", "", project[0])
# Other characters are expected to be a-z 0-9
tmp_str += re.sub("[a-z0-9]", "", project[1:])
invalid_list = list(set(tmp_str))
invalid_list.sort()
invalid_chars = "".join(str(e) for e in invalid_list)
print_and_exit(
"Wrong project name, found invalid characters: {}", invalid_chars
)
def check_main_folder(self) -> Optional[str]:
folder = Path.cwd()
first_level_error = self.inspect_main_folder(folder)
# No error raised: the current folder is a valid rapydo root
if first_level_error is None:
return None
# Errors on the current folder, let's verify parents
num_iterations = 0
while str(folder) != "/" and num_iterations < 10:
folder = folder.parent
num_iterations += 1
# Errors at this level, let's continue to verify parents
if self.inspect_main_folder(folder) is not None:
continue
# You found a rapydo folder among your parents!
# Let's suggest to change dir
# This is ../../etc
relative_path = "/".join([".."] * num_iterations)
return (
"You are not in the main folder, please change your working dir"
f"\nFound a valid parent folder: {folder}"
f"\nSuggested command: cd {relative_path}"
)
return first_level_error
def inspect_main_folder(self, folder: Path) -> Optional[str]:
"""
RAPyDo commands only works on rapydo projects, we want to ensure that
the current folder have a rapydo-like structure. These checks are based
on file existence. Further checks are performed in the following steps
"""
r = git.get_repo(str(folder))
if r is None or git.get_origin(r) is None:
return f"""You are not in a git repository
\nPlease note that this command only works from inside a rapydo-like repository
Verify that you are in the right folder, now you are in: {Path.cwd()}
"""
for fpath in self.expected_main_folders:
if not folder.joinpath(fpath).is_dir():
return f"""Folder not found: {fpath}
\nPlease note that this command only works from inside a rapydo-like repository
Verify that you are in the right folder, now you are in: {Path.cwd()}
"""
return None
def inspect_project_folder(self) -> None:
for fpath in self.expected_folders:
if not fpath.is_dir():
print_and_exit(
"Project {} is invalid: required folder not found {}",
self.project,
fpath,
)
for fpath in self.expected_files + self.raw_files:
if not fpath.is_file():
print_and_exit(
"Project {} is invalid: required file not found {}",
self.project,
fpath,
)
for fpath in self.obsolete_files:
if fpath.exists():
print_and_exit(
"Project {} contains an obsolete file or folder: {}",
self.project,
fpath,
)
# issues/57
# I'm temporary here... to be decided how to handle me
reserved_project_names = [
"abc",
"attr",
"base64",
"celery",
"click",
"collections",
"datetime",
"dateutil",
"email",
"errno",
"flask",
"flask_restful",
"flask_sqlalchemy",
"authlib",
"functools",
"glob",
"hashlib",
"hmac",
"inspect",
"io",
"json",
"jwt",
"logging",
"neo4j",
"neomodel",
"os",
"platform",
"pickle",
"plumbum",
"pymodm",
"pymongo",
"pyotp",
"pyqrcode",
"pytz",
"random",
"re",
"smtplib",
"socket",
"sqlalchemy",
"string",
"submodules",
"sys",
"time",
"test",
"unittest",
"werkzeug",
]
|
|
#!/usr/bin/env python3
# Copyright (c) 2016-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the dumpwallet RPC."""
import datetime
import os
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
def read_dump(file_name, addrs, script_addrs, hd_master_addr_old):
"""
Read the given dump, count the addrs that match, count change and reserve.
Also check that the old hd_master is inactive
"""
with open(file_name, encoding='utf8') as inputfile:
found_comments = []
found_legacy_addr = 0
found_p2sh_segwit_addr = 0
found_bech32_addr = 0
found_script_addr = 0
found_addr_chg = 0
found_addr_rsv = 0
hd_master_addr_ret = None
for line in inputfile:
line = line.strip()
if not line:
continue
if line[0] == '#':
found_comments.append(line)
else:
# split out some data
key_date_label, comment = line.split("#")
key_date_label = key_date_label.split(" ")
# key = key_date_label[0]
date = key_date_label[1]
keytype = key_date_label[2]
imported_key = date == '1970-01-01T00:00:01Z'
if imported_key:
# Imported keys have multiple addresses, no label (keypath) and timestamp
# Skip them
continue
addr_keypath = comment.split(" addr=")[1]
addr = addr_keypath.split(" ")[0]
keypath = None
if keytype == "inactivehdseed=1":
# ensure the old master is still available
assert hd_master_addr_old == addr
elif keytype == "hdseed=1":
# ensure we have generated a new hd master key
assert hd_master_addr_old != addr
hd_master_addr_ret = addr
elif keytype == "script=1":
# scripts don't have keypaths
keypath = None
else:
keypath = addr_keypath.rstrip().split("hdkeypath=")[1]
# count key types
for addrObj in addrs:
if addrObj['address'] == addr.split(",")[0] and addrObj['hdkeypath'] == keypath and keytype == "label=":
if addr.startswith('m') or addr.startswith('n'):
# P2PKH address
found_legacy_addr += 1
elif addr.startswith('2'):
# P2SH-segwit address
found_p2sh_segwit_addr += 1
elif addr.startswith('bcrt1'):
found_bech32_addr += 1
break
elif keytype == "change=1":
found_addr_chg += 1
break
elif keytype == "reserve=1":
found_addr_rsv += 1
break
# count scripts
for script_addr in script_addrs:
if script_addr == addr.rstrip() and keytype == "script=1":
found_script_addr += 1
break
return found_comments, found_legacy_addr, found_p2sh_segwit_addr, found_bech32_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret
class WalletDumpTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [["-keypool=90", "-addresstype=legacy"]]
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes()
def run_test(self):
self.nodes[0].createwallet("dump")
wallet_unenc_dump = os.path.join(self.nodes[0].datadir, "wallet.unencrypted.dump")
wallet_enc_dump = os.path.join(self.nodes[0].datadir, "wallet.encrypted.dump")
# generate 30 addresses to compare against the dump
# - 10 legacy P2PKH
# - 10 P2SH-segwit
# - 10 bech32
test_addr_count = 10
addrs = []
for address_type in ['legacy', 'p2sh-segwit', 'bech32']:
for _ in range(test_addr_count):
addr = self.nodes[0].getnewaddress(address_type=address_type)
vaddr = self.nodes[0].getaddressinfo(addr) # required to get hd keypath
addrs.append(vaddr)
# Test scripts dump by adding a 1-of-1 multisig address
multisig_addr = self.nodes[0].addmultisigaddress(1, [addrs[1]["address"]])["address"]
# Refill the keypool. getnewaddress() refills the keypool *before* taking a key from
# the keypool, so the final call to getnewaddress leaves the keypool with one key below
# its capacity
self.nodes[0].keypoolrefill()
self.log.info('Mine a block one second before the wallet is dumped')
dump_time = int(time.time())
self.nodes[0].setmocktime(dump_time - 1)
self.nodes[0].generate(1)
self.nodes[0].setmocktime(dump_time)
dump_time_str = '# * Created on {}Z'.format(
datetime.datetime.fromtimestamp(
dump_time,
tz=datetime.timezone.utc,
).replace(tzinfo=None).isoformat())
dump_best_block_1 = '# * Best block at time of backup was {} ({}),'.format(
self.nodes[0].getblockcount(),
self.nodes[0].getbestblockhash(),
)
dump_best_block_2 = '# mined on {}Z'.format(
datetime.datetime.fromtimestamp(
dump_time - 1,
tz=datetime.timezone.utc,
).replace(tzinfo=None).isoformat())
self.log.info('Dump unencrypted wallet')
result = self.nodes[0].dumpwallet(wallet_unenc_dump)
assert_equal(result['filename'], wallet_unenc_dump)
found_comments, found_legacy_addr, found_p2sh_segwit_addr, found_bech32_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
read_dump(wallet_unenc_dump, addrs, [multisig_addr], None)
assert '# End of dump' in found_comments # Check that file is not corrupt
assert_equal(dump_time_str, next(c for c in found_comments if c.startswith('# * Created on')))
assert_equal(dump_best_block_1, next(c for c in found_comments if c.startswith('# * Best block')))
assert_equal(dump_best_block_2, next(c for c in found_comments if c.startswith('# mined on')))
assert_equal(found_legacy_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_p2sh_segwit_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_bech32_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_script_addr, 1) # all scripts must be in the dump
assert_equal(found_addr_chg, 0) # 0 blocks where mined
assert_equal(found_addr_rsv, 90 * 2) # 90 keys plus 100% internal keys
# encrypt wallet, restart, unlock and dump
self.nodes[0].encryptwallet('test')
self.nodes[0].walletpassphrase('test', 100)
# Should be a no-op:
self.nodes[0].keypoolrefill()
self.nodes[0].dumpwallet(wallet_enc_dump)
found_comments, found_legacy_addr, found_p2sh_segwit_addr, found_bech32_addr, found_script_addr, found_addr_chg, found_addr_rsv, _ = \
read_dump(wallet_enc_dump, addrs, [multisig_addr], hd_master_addr_unenc)
assert '# End of dump' in found_comments # Check that file is not corrupt
assert_equal(dump_time_str, next(c for c in found_comments if c.startswith('# * Created on')))
assert_equal(dump_best_block_1, next(c for c in found_comments if c.startswith('# * Best block')))
assert_equal(dump_best_block_2, next(c for c in found_comments if c.startswith('# mined on')))
assert_equal(found_legacy_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_p2sh_segwit_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_bech32_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_script_addr, 1)
assert_equal(found_addr_chg, 90 * 2) # old reserve keys are marked as change now
assert_equal(found_addr_rsv, 90 * 2)
# Overwriting should fail
assert_raises_rpc_error(-8, "already exists", lambda: self.nodes[0].dumpwallet(wallet_enc_dump))
# Restart node with new wallet, and test importwallet
self.restart_node(0)
self.nodes[0].createwallet("w2")
# Make sure the address is not IsMine before import
result = self.nodes[0].getaddressinfo(multisig_addr)
assert not result['ismine']
self.nodes[0].importwallet(wallet_unenc_dump)
# Now check IsMine is true
result = self.nodes[0].getaddressinfo(multisig_addr)
assert result['ismine']
self.log.info('Check that wallet is flushed')
with self.nodes[0].assert_debug_log(['Flushing wallet.dat'], timeout=20):
self.nodes[0].getnewaddress()
if __name__ == '__main__':
WalletDumpTest().main()
|
|
import re
import itertools
from pprint import pprint
import maya.cmds as cmds
import pymel.util as util
import pymel.util.trees as trees
import pymel.api as api
import pymel.internal.cmdcache as cmdcache
import pymel.internal.apicache as apicache
import pymel.internal.factories as factories
#===============================================================================
# maya node type hierarchy info
#===============================================================================
cmds.file(new=1, f=1)
lsTypes = cmds.ls(nodeTypes=1)
num = len(lsTypes)
lsTypes = set(lsTypes)
assert num == len(lsTypes), "The result of ls(nodeTypes=1) contained duplicates"
print num
print 'got ls(nodeTypes=1), confirmed no dupes'
realTypes = lsTypes
try:
allTypesReal = cmds.allNodeTypes()
except RuntimeError:
print "Error calling allNodeTypes() !!"
allTypesReal = None
realAndAbstract = lsTypes
abstractTypes = None
else:
num = len(allTypesReal)
allTypesReal = set(allTypesReal)
assert num == len(allTypesReal), "The result of allNodeTypes() contained duplicates"
print num
print 'got allNodeTypes(), confirmed no dupes'
assert lsTypes == allTypesReal, "ls(nodeTypes=1) and allNodeTypes() returned different result"
print 'confirmed allNodeTypes() == ls(nodeTypes=1)'
abstractSuffix = ' (abstract)'
rawRealAndAbstract = cmds.allNodeTypes(includeAbstract=True)
realAndAbstract = set()
for x in rawRealAndAbstract:
if x.endswith(abstractSuffix):
x = x[:-len(abstractSuffix)]
assert x not in realAndAbstract
realAndAbstract.add(x)
abstractTypes = realAndAbstract - realTypes
assert len(abstractTypes) + len(realTypes) == len(realAndAbstract)
print 'got allNodeTypes(includeAbstract=True), separated nodes into real + abstract'
# TODO - make and load a plugin which makes one of every possible plugin node
# type...
# with 2012, we have nodeType(isTypeName), so no longer need to add nodes!
#print 'about to make nodes...'
#mobjDict = {}
#dagMod = api.MDagModifier()
#dgMod = api.MDGModifier()
#for nodeType in realTypes:
# #print 'making nodeType %s...' % nodeType,
# mobjDict[nodeType] = apicache._makeDgModGhostObject(nodeType, dagMod, dgMod)
# #print 'success!'
#dagMod.doIt()
#dgMod.doIt()
#
#print 'made nodes!'
#
#nodeDict = {}
#mfnDag = api.MFnDagNode()
#mfnDep = api.MFnDependencyNode()
#nonMelQueryableApiTypes = [api.MFn.kManipContainer, api.MFn.kManip2DContainer,
# api.MFn.kManipulator3D, api.MFn.kManipulator2D,
# api.MFn.kPolyToolFeedbackShape]
#nonMelQueryableTypes = set()
#melQueryableTypes = set()
#dagTypes = set()
#depTypes = set()
#for nodeType, mobj in mobjDict.iteritems():
# if mfnDag.hasObj(mobj):
# dagTypes.add(nodeType)
# mfnDag.setObject(mobj)
# nodeDict[nodeType] = mfnDag.fullPathName()
# else:
# depTypes.add(nodeType)
# mfnDep.setObject(mobj)
# nodeDict[nodeType] = mfnDep.name()
# for manipApi in nonMelQueryableApiTypes:
# if mobj.hasFn(manipApi):
# nonMelQueryableTypes.add(nodeType)
# break
# else:
# melQueryableTypes.add(nodeType)
#print "num non queryable types:", len(nonMelQueryableTypes)
#
##nodeDict = {}
##for nodeType in realTypes:
## result = cmds.createNode(nodeType)
## nodeDict[nodeType] = result
#
#assert len(nodeDict) == len(realTypes)
#assert len(nonMelQueryableTypes) + len(melQueryableTypes) == len(realTypes)
#assert nonMelQueryableTypes | melQueryableTypes == realTypes
inheritances = {}
badInheritances = {}
goodInheritances = {}
#for nodeType in melQueryableTypes:
for nodeType in realAndAbstract:
try:
inheritance = cmds.nodeType( nodeType, inherited=True, isTypeName=True)
except Exception, e:
print "error caught:"
print e
inheritance = e
inheritances[nodeType] = inheritance
if not inheritance or isinstance(inheritance, Exception):
badInheritances[nodeType] = inheritance
else:
goodInheritances[nodeType] = inheritance
if badInheritances:
print "#" * 60
print "Warning!!!"
print "errors in getting inheritance for following node types:"
for x in badInheritances:
print " ", x
print "#" * 60
#print getApiTypes(mobjDict['polyMoveUVManip'])
discoveredNodes = set()
for nodeType, inheritance in goodInheritances.iteritems():
assert inheritance[-1] == nodeType
for x in inheritance:
if x not in realAndAbstract:
discoveredNodes.add(x)
if discoveredNodes:
print "#" * 60
print "Warning!!!"
print "%s nodes were not in realAndAbstract" % ', '.join(discoveredNodes)
print "#" * 60
allKnownNodes = realAndAbstract | discoveredNodes
def compareTrees(tree1, tree2):
def convertTree(oldTree):
if isinstance(oldTree, dict):
return oldTree
newTree = {}
for key, parents, children in oldTree:
newTree[key] = [parents, set(children)]
return newTree
tree1 = convertTree(tree1)
tree2 = convertTree(tree2)
t1set = set(tree1)
t2set = set(tree2)
both = t1set & t2set
only1 = t1set - both
only2 = t2set - both
diff = {}
for nodeType in both:
n1 = tree1[nodeType]
n2 = tree2[nodeType]
if n1 != n2:
if n1[0] == n2[0]:
parentDiff = 'same'
else:
parentDiff = (n1[0], n2[0])
if n1[1] == n2[1]:
childDiff = 'same'
else:
childDiff = (n1[1] - n2[1], n2[1] - n1[1])
diff[nodeType] = (parentDiff, childDiff)
return only1, only2, diff
nodeTypeTree = {}
for nodeType in allKnownNodes:
nodeTypeTree[nodeType] = [ [], set() ]
for nodeType, inheritance in goodInheritances.iteritems():
assert inheritance[-1] == nodeType
# len - 1 b/c final item is this nodeType
for i in xrange(len(inheritance) - 1):
parent = inheritance[i]
child = inheritance[i + 1]
# add the child to the parent
nodeTypeTree[parent][1].add(child)
# set the parents for the child
parents = list(reversed(inheritance[:i+1]))
if nodeTypeTree[child][0]:
assert nodeTypeTree[child][0] == parents
else:
nodeTypeTree[child][0] = parents
#eliminate manipulators
nonManipTree = {}
manipulators = set()
for name, data in nodeTypeTree.iteritems():
parents = data[0]
if parents is not None and ('manip3D' in parents or name == 'manip3D'):
manipulators.add(name)
else:
nonManipTree[name] = data
nonManipNonPlugin = {}
for name, data in nonManipTree.iteritems():
parents = data[0]
if parents is not None:
if (any(x.startswith('TH') for x in parents)
or name.startswith('TH')):
continue
nonManipNonPlugin[name] = data
print "trees equal?"
only1, only2, diff = compareTrees(nonManipNonPlugin, factories.nodeHierarchy)
print
print "-" * 60
print "only1:"
pprint(list(only1))
print "-" * 60
print
print
print "-" * 60
print "only2:"
pprint(list(only2))
print "-" * 60
print
print
print "-" * 60
print "diff:"
#pprint(diff)
print "-" * 60
print
#===============================================================================
# api type hierarchy info
#===============================================================================
def getApiTypes(mobj):
apiTypes = []
for apiTypeStr, apiType in factories.apiTypesToApiEnums.iteritems():
if mobj.hasFn(apiType):
apiTypes.append(apiTypeStr)
return apiTypes
mayaToApi = {}
mayaToAllApi = {}
unknownTypes = set()
#toCreate = set(nonManipTree)
toCreate = set(realTypes) - manipulators - set(apicache.ApiCache.CRASH_TYPES)
with apicache._GhostObjMaker(toCreate, manipError=False, multi=True) as typeToObj:
for mayaType in toCreate:
obj = typeToObj[mayaType]
if obj :
apiType = obj.apiTypeStr()
mayaToApi[mayaType] = apiType
mayaToAllApi[mayaType] = getApiTypes(obj)
else:
unknownTypes.add(mayaType)
assert mayaToApi.get('containerBase') == 'kContainerBase'
if unknownTypes:
print "#" * 60
print "Warning!!!"
print "could not create the following node types (which SHOULD be createable):"
for x in sorted(unknownTypes):
print " ", x
print "#" * 60
ac = apicache.ApiCache()
ac._buildApiTypesList()
allApiTypes = set(ac.apiTypesToApiEnums)
#===============================================================================
# First attempt at querying hierarchy info, by finding common types in children,
# and filtering types found in parents
#===============================================================================
#missingApiInfo = set(nonManipTree) - set(mayaToApi)
##missingNonAbstract = missingApiInfo - abstractTypes
#
## To try to find what the apiType for a maya type is, first find all api types
## that are shared by all it's children (it should have children, because if we
## can't create the node type, it should be abstract...
#
#sharedByChildren = {}
#noSharedByChildren = set()
#for missingType in missingApiInfo:
# common = None
# for otherType, apiTypes in mayaToAllApi.iteritems():
# if nodeTypeTree[otherType][0] is None:
# print 'type %s had None parent' % otherType
# if missingType in nodeTypeTree[otherType][0]:
# if common is None:
# common = set(apiTypes)
# else:
# common = common & set(apiTypes)
# if common:
# sharedByChildren[missingType] = common
# else:
# noSharedByChildren.add(missingType)
#
## these are strange, because every node should at least all have things like
## kBase, kNamedObject, kDependencyNode...
#if noSharedByChildren:
# print "#" * 60
# print "Warning!!!"
# print "could not find any common api types in children of the following node types:"
# for x in sorted(noSharedByChildren):
# print " ", x
# print "#" * 60
#
## these are api types which are shared by all dependency nodes
#baseApiTypes = set(['kBase', 'kNamedObject', 'kDependencyNode'])
#
#possibilities = {}
## Now, remove api types which are shared by any parents
#for mayaType, possibleApiTypes in sharedByChildren.iteritems():
# filtered = set(possibleApiTypes)
# for parent in nodeTypeTree[mayaType][0]:
# if parent in mayaToApi:
# filtered -= set([mayaToApi[parent]])
# elif parent in sharedByChildren:
# filtered -= sharedByChildren[parent]
# filtered -= baseApiTypes
# possibilities[mayaType] = filtered
#===============================================================================
# Second attempt at querying hierarchy info, by finding common types in children,
# then filtering types found in parents... AND using common ancestry information
#===============================================================================
# build up some information about our hierarchy
def commonAncestor(mayaType1, mayaType2):
if None in (mayaType1, mayaType2):
return None
def reversedParents(mayaType):
# want the parents in order from most generic to most specific - that
# way, we can go from left to right, comparing items until we get to one
# that doesn't match
return list(reversed(nodeTypeTree[mayaType][0])) + [mayaType]
parents1 = reversedParents(mayaType1)
parents2 = reversedParents(mayaType2)
commonAncestor = None
for i in xrange(min(len(parents1), len(parents2))):
if parents1[i] == parents2[i]:
commonAncestor = parents1[i]
else:
break
return commonAncestor
apiTypeToRealMayaTypes = {}
# for a given api type, find the "most specific" maya type that can be an
# ancestor of all maya types that "contain" that api type
apiTypeToCommonMayaAncestor = {}
for mayaType, apiTypes in mayaToAllApi.iteritems():
for apiType in apiTypes:
apiTypeToRealMayaTypes.setdefault(apiType, []).append(mayaType)
if apiType not in apiTypeToCommonMayaAncestor:
apiTypeToCommonMayaAncestor[apiType] = mayaType
else:
apiTypeToCommonMayaAncestor[apiType] = commonAncestor(mayaType,
apiTypeToCommonMayaAncestor[apiType])
# now build the reverse dict - from maya type to a list of all api types that
# it is the common ancestor for
commonMayaAncestorToApiTypes = {}
for apiType, mayaType in apiTypeToCommonMayaAncestor.iteritems():
commonMayaAncestorToApiTypes.setdefault(mayaType, []).append(apiType)
# now, get a list of maya types for which there is only ONE api type that has
# it as it's most-specific-common-ancestor...
commonMayaAncestorToSingleApi = {}
for mayaType, apiTypes in commonMayaAncestorToApiTypes.iteritems():
if len(apiTypes) == 1:
commonMayaAncestorToSingleApi[mayaType] = apiTypes[0]
# these are api types which are shared by all dependency nodes
baseApiTypes = set(['kBase', 'kNamedObject', 'kDependencyNode'])
parentDict = dict((mayaType, parents[0])
for mayaType, (parents, children) in nodeTypeTree.iteritems()
if parents)
nodeTreeObj = trees.treeFromDict(parentDict)
#orderedTree = [ (x.value, tuple(y.value for y in x.parents()), tuple(y.value for y in x.childs()) ) \
# for x in nodeTreeObj.preorder() ]
guessedByCommonAncestor = {}
guessedByName = {}
nameAncestorConflicts = {}
guessedByUnique = {}
multiplePossibilities = {}
noUnique = {}
noChildIntersection = set()
childIntersections = {}
childUnions = {}
parentUnions = {}
childPreorders = {}
def nodeToApiName(nodeName):
return 'k' + util.capitalize(nodeName)
def getLowerCaseMapping(names):
uniqueLowerNames = {}
multiLowerNames = {}
for name in names:
lowerType = name.lower()
if lowerType in multiLowerNames:
multiLowerNames[lowerType].append(name)
elif lowerType in uniqueLowerNames:
multiLowerNames[lowerType] = [uniqueLowerNames.pop(lowerType), name]
else:
uniqueLowerNames[lowerType] = name
return uniqueLowerNames, multiLowerNames
uniqueLowerMaya, multiLowerMaya = getLowerCaseMapping(allKnownNodes)
uniqueLowerApi, multiLowerApi = getLowerCaseMapping(allApiTypes)
if multiLowerMaya:
print "#" * 60
print "Warning!!!"
print "maya node names differed only in case:"
for types in multiLowerMaya.itervalues():
print " %s" % ', '.join(types)
print "#" * 60
if multiLowerApi:
print "#" * 60
print "Warning!!!"
print "api type names differed only in case:"
for types in multiLowerApi.itervalues():
print " %s" % ', '.join(types)
print "#" * 60
modifiers = {
'base':'',
'abstract':'',
'node':'',
'shape':'',
'mod(?!(ify|ifier))':'modify',
'mod(?!(ify|ifier))':'modifier',
'modifier':'mod',
'modify':'mod',
'poly(?!gon)':'polygon',
'polygon':'poly',
'vert(?!(ex|ice))':'vertex',
'vert(?!(ex|ice))':'vertice',
'vertice':'vert',
'vertex':'vert',
'subd(?!iv)':'subdiv',
'subd(?!iv)':'subdivision',
'subdiv(?!ision)':'subd',
'subdiv(?!ision)':'subdivision',
'subdivision':'subd',
'subdivision':'subdiv',
'^th(custom)?':'plugin',
}
modifiers = [(re.compile(find), replace)
for find, replace in modifiers.iteritems()]
apiSuffixes = ['', 'node', 'shape', 'shapenode']
def guessApiTypeByName(nodeName, debug=False):
# first, try the easy case...
apiName = nodeToApiName(nodeName)
if apiName in allApiTypes:
if debug:
print 'basic transform worked!'
return apiName
lowerNode = nodeName.lower()
if lowerNode not in uniqueLowerMaya:
if debug:
print 'lower-case node name not unique...'
return None
# now, try with various modifications...
possibleApiNames = set()
possibleModifications = [(find, replace) for find, replace in modifiers
if find.search(lowerNode)]
# find all possible combinations of all possible modifications
for modifyNum in xrange(len(possibleModifications) + 1):
for modifyCombo in itertools.combinations(possibleModifications, modifyNum):
baseName = lowerNode
for find, replace in modifyCombo:
baseName = find.sub(replace, baseName)
if debug:
print [x[0].pattern for x in modifyCombo], baseName
if not baseName:
# if we've eliminated the name with our changes - ie,
# 'shape' would go to '' - then skip
continue
if baseName != lowerNode and (baseName in uniqueLowerMaya
or baseName in multiLowerMaya):
# if after modification, our new name is the name of another
# maya node, skip
continue
apiLower = 'k' + baseName
if apiLower in uniqueLowerApi:
possibleApiNames.add(uniqueLowerApi[apiLower])
else:
for suffix in apiSuffixes:
apiWithSuffix = apiLower + suffix
if apiWithSuffix in uniqueLowerApi:
possibleApiNames.add(uniqueLowerApi[apiWithSuffix])
if debug:
print possibleApiNames
if len(possibleApiNames) == 1:
return list(possibleApiNames)[0]
return None
#def guessApiTypeByName(nodeName):
# def isApiType(apiName):
# return isinstance(getattr(api.MFn, apiName, None), int)
#
# for suffix in ('', 'node', 'shape', 'shapenode'):
# if suffix:
# if not nodeName.lower().endswith(suffix):
# continue
# baseName = nodeName[:-len(suffix)]
# else:
# baseName = nodeName
# if not baseName:
# continue
# apiName = nodeToApiName(baseName)
# if isApiType(apiName):
# return apiName
# return None
# now going from bases to leaves,
for currentTree in nodeTreeObj.preorder():
mayaType = currentTree.value
if mayaType is None:
continue
if mayaType in manipulators:
continue
# find nodes for which we don't have an api type for yet...
if mayaType in mayaToApi:
assert mayaType in mayaToAllApi, "type %s was in mayaToApi but not mayaToAllApi" % mayaType
continue
uniqueApiTypes = set()
# find intersection of all types shared by all children (for which we have info)
childIntersection = None
childUnion = set()
childPreorder = []
for childTreeNode in currentTree.preorder():
childType = childTreeNode.value
if childType == mayaType:
continue
childPreorder.append(childType)
allChildApiTypes = mayaToAllApi.get(childType)
if allChildApiTypes is not None:
allChildApiTypes = set(allChildApiTypes)
if childIntersection is None:
childIntersection = allChildApiTypes
else:
childIntersection &= allChildApiTypes
childUnion |= allChildApiTypes
if childIntersection:
childIntersections[mayaType] = childIntersection
else:
if childIntersection is None:
childIntersection = set()
noChildIntersection.add(mayaType)
childUnions[mayaType] = childUnion
childPreorders[mayaType] = childPreorder
# find union of parent types
parentUnion = set(baseApiTypes)
for parentTreeNode in currentTree.parents():
parentType = parentTreeNode.value
if parentType is not None:
parentUnion |= set(mayaToAllApi[parentType])
parentUnions[mayaType] = parentUnion
# unique types were found in children, but weren't found in parents
uniqueApiTypes = childIntersection - parentUnion
# information gathering is done... now try to figure out the apiType!
apiType = None
# see if there's exactly one api type that this maya type is the
# most-specific-common-ancestor of...
commonAncestorGuess = commonMayaAncestorToSingleApi.get(mayaType, None)
# ...and see if we can guess by name...
apiNameGuess = guessApiTypeByName(mayaType)
if apiNameGuess:
apiType = apiNameGuess
guessedByName[mayaType] = apiType
if commonAncestorGuess and commonAncestorGuess != apiNameGuess:
nameAncestorConflicts[mayaType] = (apiNameGuess, commonAncestorGuess)
elif commonAncestorGuess:
apiType = commonAncestorGuess
guessedByCommonAncestor[mayaType] = apiType
elif uniqueApiTypes:
# if we did have unique types...
if len(uniqueApiTypes) == 1:
# ...we're golden if there was only 1...
apiType = list(uniqueApiTypes)[0]
guessedByUnique[mayaType] = apiType
else:
# ...but a little stuck if there's more.
multiplePossibilities[mayaType] = uniqueApiTypes
else:
# if name guess failed, and we have no unique ApiTypes, we'll have to
# fall back on using the parent type
parentType = currentTree.parent.value
if parentType is None:
apiType = 'kDependencyNode'
else:
apiType = mayaToApi.get(parentType)
noUnique[mayaType] = apiType
allApi = uniqueApiTypes | parentUnion
if apiType is not None:
allApi |= set([apiType])
mayaToApi[mayaType] = apiType
mayaToAllApi[mayaType] = sorted(allApi)
if nameAncestorConflicts:
print "#" * 60
print "Warning!!!"
print "had conflicting name / common ancestor guess for these maya nodes:"
for mayaType, (nameGuess, ancestorGuess) in nameAncestorConflicts.iteritems():
print " %20s - %20s / %s" % (mayaType, nameGuess, ancestorGuess)
print "#" * 60
if multiplePossibilities:
print "#" * 60
print "Warning!!!"
print "could not find a unique api type for these nodes:"
for mayaType in sorted(multiplePossibilities):
print " %20s - %s" % (mayaType, ', '.join(sorted(multiplePossibilities[mayaType])))
print "#" * 60
|
|
"""
Example of JSON body validation in POST with various kinds of specs and views.
"""
try:
from http import HTTPStatus
except ImportError:
import httplib as HTTPStatus
from flask import Blueprint
from flask import Flask
from flask import jsonify
from flask import request
from flasgger import Schema
from flasgger import Swagger
from flasgger import SwaggerView
from flasgger import fields
from flasgger import swag_from
from flasgger import validate
app = Flask(__name__)
swag = Swagger(app)
test_specs_1 = {
"tags": [
"users"
],
"parameters": [
{
"name": "body",
"in": "body",
"required": True,
"schema": {
"id": "User",
"required": [
"username",
"age"
],
"properties": {
"username": {
"type": "string",
"description": "The user name.",
"default": "Sirius Black"
},
"age": {
"type": "integer",
"description": "The user age (should be integer)",
"default": "180"
},
"tags": {
"type": "array",
"description": "optional list of tags",
"default": [
"wizard",
"hogwarts",
"dead"
],
"items": {
"type": "string"
}
}
}
}
}
],
"responses": {
"200": {
"description": "A single user item",
"schema": {
"$ref": "#/definitions/User"
}
}
}
}
@app.route("/manualvalidation", methods=['POST'])
@swag_from("test_validation.yml")
def manualvalidation():
"""
In this example you need to call validate() manually
passing received data, Definition (schema: id), specs filename
"""
data = request.json
validate(data, 'User', "test_validation.yml")
return jsonify(data)
@app.route("/validateannotation", methods=['POST'])
@swag.validate('User')
@swag_from("test_validation.yml")
def validateannotation():
"""
In this example you use validate(schema_id) annotation on the
method in which you want to validate received data
"""
data = request.json
return jsonify(data)
@app.route("/autovalidation", methods=['POST'])
@swag_from("test_validation.yml", validation=True)
def autovalidation():
"""
Example using auto validation from yaml file.
In this example you don't need to call validate() because
`validation=True` on @swag_from does that for you.
In this case it will use the same provided filename
and will extract the schema from `in: body` definition
and the data will default to `request.json`
or you can specify:
@swag_from('file.yml',
validation=True,
definition='User',
data=lambda: request.json, # any callable
)
"""
data = request.json
return jsonify(data)
@app.route("/autovalidationfromspecdict", methods=['POST'])
@swag_from(test_specs_1, validation=True)
def autovalidation_from_spec_dict():
"""
Example using data from dict to validate.
In this example you don't need to call validate() because
`validation=True` on @swag_from does that for you.
In this case it will use the same provided filename
and will extract the schema from `in: body` definition
and the data will default to `request.json`
or you can specify:
@swag_from('file.yml',
validation=True,
definition='User',
data=lambda: request.json, # any callable
)
"""
data = request.json
return jsonify(data)
class User(Schema):
username = fields.Str(required=True, default="Sirius Black")
# wrong default "180" to force validation error
age = fields.Int(required=True, min=18, default="180")
tags = fields.List(fields.Str(), default=["wizard", "hogwarts", "dead"])
class UserPostView(SwaggerView):
tags = ['users']
parameters = User
responses = {
200: {
'description': 'A single user item',
'schema': User
}
}
validation = True
def post(self):
"""
Example using marshmallow Schema
validation=True forces validation of parameters in body
---
# This value overwrites the attributes above
deprecated: true
"""
return jsonify(request.json)
app.add_url_rule(
'/schemevalidation',
view_func=UserPostView.as_view('schemevalidation'),
methods=['POST']
)
# ensure the same works for blueprints
example_blueprint = Blueprint(
"example", __name__, url_prefix='/blueprint')
@example_blueprint.route("/autovalidationfromdocstring", methods=['POST'])
@swag.validate('Officer')
def autovalidation_from_docstring():
"""
Test validation using JsonSchema
The default payload is invalid, try it, then change the age to a
valid integer and try again
---
tags:
- officer
parameters:
- name: body
in: body
required: true
schema:
id: Officer
required:
- name
- age
properties:
name:
type: string
description: The officer's name.
default: "James T. Kirk"
age:
type: integer
description: The officer's age (should be integer)
default: "138"
tags:
type: array
description: optional list of tags
default: ["starfleet", "captain", "enterprise", "dead"]
items:
type: string
responses:
200:
description: A single officer item
schema:
$ref: '#/definitions/Officer'
"""
data = request.json
return jsonify(data)
@example_blueprint.route('/manualvalidation', methods=['POST'])
@swag_from("test_validation.yml")
def manualvalidation_bp():
"""
In this example you need to call validate() manually
passing received data, Definition (schema: id), specs filename
"""
data = request.json
validate(data, 'User', "test_validation.yml")
return jsonify(data)
@example_blueprint.route('/autovalidation', methods=['POST'])
@swag_from("test_validation.yml", validation=True)
def autovalidation_bp():
"""
Example using auto validation from yaml file.
In this example you don't need to call validate() because
`validation=True` on @swag_from does that for you.
In this case it will use the same provided filename
and will extract the schema from `in: body` definition
and the data will default to `request.json`
or you can specify:
@swag_from('file.yml',
validation=True,
definition='User',
data=lambda: request.json, # any callable
)
"""
data = request.json
return jsonify(data)
@example_blueprint.route("/autovalidationfromspecdict", methods=['POST'])
@swag_from(test_specs_1, validation=True)
def autovalidation_from_spec_dict_bp():
"""
Example using data from dict to validate.
In this example you don't need to call validate() because
`validation=True` on @swag_from does that for you.
In this case it will use the same provided filename
and will extract the schema from `in: body` definition
and the data will default to `request.json`
or you can specify:
@swag_from('file.yml',
validation=True,
definition='User',
data=lambda: request.json, # any callable
)
"""
data = request.json
return jsonify(data)
class BPUserPostView(SwaggerView):
tags = ['users']
parameters = User
responses = {
200: {
'description': 'A single user item',
'schema': User
}
}
validation = True
def post(self):
"""
Example using marshmallow Schema
validation=True forces validation of parameters in body
---
# This value overwrites the attributes above
deprecated: true
"""
return jsonify(request.json)
example_blueprint.add_url_rule(
'/schemevalidation',
view_func=BPUserPostView.as_view('schemevalidation'),
methods=['POST']
)
app.register_blueprint(example_blueprint)
def test_swag(client, specs_data):
"""
This test is runs automatically in Travis CI
:param client: Flask app test client
:param specs_data: {'url': {swag_specs}} for every spec in app
"""
apispec = specs_data.get('/apispec_1.json')
assert apispec is not None
paths = apispec.get('paths')
expected_user_paths = (
'/autovalidation',
'/validateannotation',
'/autovalidationfromspecdict',
'/blueprint/autovalidation',
'/blueprint/autovalidationfromspecdict',
'/blueprint/manualvalidation',
'/blueprint/schemevalidation',
'/manualvalidation',
'/schemevalidation',
)
expected_officer_paths = (
'/blueprint/autovalidationfromdocstring',
)
invalid_users = (
"""
{
"username": "Sirius Black",
"age": "180",
"tags": [
"wizard",
"hogwarts",
"dead"
]
}
""",
"""
{
"age": 180,
"tags": [
"wizard"
]
}
""",
)
valid_users = (
"""
{
"username": "Sirius Black",
"age": 180,
"tags": [
"wizard",
"hogwarts",
"dead"
]
}
""",
"""
{
"username": "Ronald Weasley",
"age": 22
}
""",
)
invalid_officers = (
"""
{
"name": "James T. Kirk",
"age": "138",
"tags": [
"captain",
"enterprise",
"dead"
]
}
""",
"""
{
"age": 138,
"tags": [
"captain"
]
}
""",
)
valid_officers = (
"""
{
"name": "James T. Kirk",
"age": 138,
"tags": [
"captain",
"enterprise",
"dead"
]
}
""",
"""
{
"name": "Jean-Luc Picard",
"age": 60
}
""",
)
assert paths is not None and len(paths) > 0
definitions = apispec.get('definitions')
assert definitions is not None
assert definitions.get('User') is not None
assert definitions.get('Officer') is not None
for expected_path in expected_user_paths:
assert paths.get(expected_path) is not None
for invalid_user in invalid_users:
response = client.post(
expected_path, data=invalid_user,
content_type='application/json')
assert response.status_code == HTTPStatus.BAD_REQUEST
for valid_user in valid_users:
response = client.post(
expected_path, data=valid_user,
content_type='application/json')
assert response.status_code == HTTPStatus.OK
for expected_path in expected_officer_paths:
assert paths.get(expected_path) is not None
for invalid_officer in invalid_officers:
response = client.post(
expected_path, data=invalid_officer,
content_type='application/json')
assert response.status_code == HTTPStatus.BAD_REQUEST
for valid_officer in valid_officers:
response = client.post(
expected_path, data=valid_officer,
content_type='application/json')
assert response.status_code == HTTPStatus.OK
if __name__ == "__main__":
app.run(debug=True)
|
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase, RequestFactory
try:
# Django >= 1.7
from django.test import override_settings
except ImportError:
# Django <= 1.6
from django.test.utils import override_settings
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.utils.timezone import utc, localtime
from django.utils import timezone
import pytz
import json
from notifications import notify
from notifications.models import Notification
from notifications.utils import id2slug
class NotificationTest(TestCase):
@override_settings(USE_TZ=True)
@override_settings(TIME_ZONE='Asia/Shanghai')
def test_use_timezone(self):
from_user = User.objects.create(username="from", password="pwd", email="[email protected]")
to_user = User.objects.create(username="to", password="pwd", email="[email protected]")
notify.send(from_user, recipient=to_user, verb='commented', action_object=from_user)
notification = Notification.objects.get(recipient=to_user)
delta = timezone.now().replace(tzinfo=utc) - localtime(notification.timestamp, pytz.timezone(settings.TIME_ZONE))
self.assertTrue(delta.seconds < 60)
# The delta between the two events will still be less than a second despite the different timezones
# The call to now and the immediate call afterwards will be within a short period of time, not 8 hours as the test above was originally.
@override_settings(USE_TZ=False)
@override_settings(TIME_ZONE='Asia/Shanghai')
def test_disable_timezone(self):
from_user = User.objects.create(username="from2", password="pwd", email="[email protected]")
to_user = User.objects.create(username="to2", password="pwd", email="[email protected]")
notify.send(from_user, recipient=to_user, verb='commented', action_object=from_user)
notification = Notification.objects.get(recipient=to_user)
delta = timezone.now() - notification.timestamp
self.assertTrue(delta.seconds < 60)
class NotificationManagersTest(TestCase):
def setUp(self):
self.message_count = 10
self.from_user = User.objects.create(username="from2", password="pwd", email="[email protected]")
self.to_user = User.objects.create(username="to2", password="pwd", email="[email protected]")
self.to_group = Group.objects.create(name="to2_g")
self.to_group.user_set.add(self.to_user)
for i in range(self.message_count):
notify.send(self.from_user, recipient=self.to_user, verb='commented', action_object=self.from_user)
# Send notification to group
notify.send(self.from_user, recipient=self.to_group, verb='commented', action_object=self.from_user)
self.message_count += 1
def test_unread_manager(self):
self.assertEqual(Notification.objects.unread().count(), self.message_count)
n = Notification.objects.filter(recipient=self.to_user).first()
n.mark_as_read()
self.assertEqual(Notification.objects.unread().count(), self.message_count-1)
for n in Notification.objects.unread():
self.assertTrue(n.unread)
def test_read_manager(self):
self.assertEqual(Notification.objects.unread().count(), self.message_count)
n = Notification.objects.filter(recipient=self.to_user).first()
n.mark_as_read()
self.assertEqual(Notification.objects.read().count(), 1)
for n in Notification.objects.read():
self.assertFalse(n.unread)
def test_mark_all_as_read_manager(self):
self.assertEqual(Notification.objects.unread().count(), self.message_count)
Notification.objects.filter(recipient=self.to_user).mark_all_as_read()
self.assertEqual(Notification.objects.unread().count(), 0)
def test_mark_all_as_unread_manager(self):
self.assertEqual(Notification.objects.unread().count(), self.message_count)
Notification.objects.filter(recipient=self.to_user).mark_all_as_read()
self.assertEqual(Notification.objects.unread().count(), 0)
Notification.objects.filter(recipient=self.to_user).mark_all_as_unread()
self.assertEqual(Notification.objects.unread().count(), self.message_count)
def test_mark_all_deleted_manager_without_soft_delete(self):
self.assertRaises(ImproperlyConfigured, Notification.objects.active)
self.assertRaises(ImproperlyConfigured, Notification.objects.active)
self.assertRaises(ImproperlyConfigured, Notification.objects.mark_all_as_deleted)
self.assertRaises(ImproperlyConfigured, Notification.objects.mark_all_as_active)
@override_settings(NOTIFICATIONS_SOFT_DELETE=True)
def test_mark_all_deleted_manager(self):
n = Notification.objects.filter(recipient=self.to_user).first()
n.mark_as_read()
self.assertEqual(Notification.objects.read().count(), 1)
self.assertEqual(Notification.objects.unread().count(), self.message_count-1)
self.assertEqual(Notification.objects.active().count(), self.message_count)
self.assertEqual(Notification.objects.deleted().count(), 0)
Notification.objects.mark_all_as_deleted()
self.assertEqual(Notification.objects.read().count(), 0)
self.assertEqual(Notification.objects.unread().count(), 0)
self.assertEqual(Notification.objects.active().count(), 0)
self.assertEqual(Notification.objects.deleted().count(), self.message_count)
Notification.objects.mark_all_as_active()
self.assertEqual(Notification.objects.read().count(), 1)
self.assertEqual(Notification.objects.unread().count(), self.message_count-1)
self.assertEqual(Notification.objects.active().count(), self.message_count)
self.assertEqual(Notification.objects.deleted().count(), 0)
class NotificationTestPages(TestCase):
def setUp(self):
self.message_count = 10
self.from_user = User.objects.create_user(username="from", password="pwd", email="[email protected]")
self.to_user = User.objects.create_user(username="to", password="pwd", email="[email protected]")
self.to_user.is_staff = True
self.to_user.save()
for i in range(self.message_count):
notify.send(self.from_user, recipient=self.to_user, verb='commented', action_object=self.from_user)
def logout(self):
self.client.post(reverse('admin:logout')+'?next=/', {})
def login(self, username, password):
self.logout()
response = self.client.post(reverse('login'), {'username': username, 'password': password})
self.assertEqual(response.status_code, 302)
return response
def test_all_messages_page(self):
self.login('to', 'pwd')
response = self.client.get(reverse('notifications:all'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['notifications']), len(self.to_user.notifications.all()))
def test_unread_messages_pages(self):
self.login('to', 'pwd')
response = self.client.get(reverse('notifications:unread'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['notifications']), len(self.to_user.notifications.unread()))
self.assertEqual(len(response.context['notifications']), self.message_count)
for i, n in enumerate(self.to_user.notifications.all()):
if i % 3 == 0:
response = self.client.get(reverse('notifications:mark_as_read', args=[id2slug(n.id)]))
self.assertEqual(response.status_code, 302)
response = self.client.get(reverse('notifications:unread'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['notifications']), len(self.to_user.notifications.unread()))
self.assertTrue(len(response.context['notifications']) < self.message_count)
response = self.client.get(reverse('notifications:mark_all_as_read'))
self.assertRedirects(response, reverse('notifications:all'))
response = self.client.get(reverse('notifications:unread'))
self.assertEqual(len(response.context['notifications']), len(self.to_user.notifications.unread()))
self.assertEqual(len(response.context['notifications']), 0)
def test_next_pages(self):
self.login('to', 'pwd')
response = self.client.get(reverse('notifications:mark_all_as_read')+"?next="+reverse('notifications:unread'))
self.assertRedirects(response, reverse('notifications:unread'))
slug = id2slug(self.to_user.notifications.first().id)
response = self.client.get(reverse('notifications:mark_as_read', args=[slug])+"?next="+reverse('notifications:unread'))
self.assertRedirects(response, reverse('notifications:unread'))
slug = id2slug(self.to_user.notifications.first().id)
response = self.client.get(reverse('notifications:mark_as_unread', args=[slug])+"?next="+reverse('notifications:unread'))
self.assertRedirects(response, reverse('notifications:unread'))
def test_delete_messages_pages(self):
self.login('to', 'pwd')
slug = id2slug(self.to_user.notifications.first().id)
response = self.client.get(reverse('notifications:delete', args=[slug]))
self.assertRedirects(response, reverse('notifications:all'))
response = self.client.get(reverse('notifications:all'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['notifications']), len(self.to_user.notifications.all()))
self.assertEqual(len(response.context['notifications']), self.message_count-1)
response = self.client.get(reverse('notifications:unread'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['notifications']), len(self.to_user.notifications.unread()))
self.assertEqual(len(response.context['notifications']), self.message_count-1)
@override_settings(NOTIFICATIONS_SOFT_DELETE=True)
def test_soft_delete_messages_manager(self):
self.login('to', 'pwd')
slug = id2slug(self.to_user.notifications.first().id)
response = self.client.get(reverse('notifications:delete', args=[slug]))
self.assertRedirects(response, reverse('notifications:all'))
response = self.client.get(reverse('notifications:all'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['notifications']), len(self.to_user.notifications.active()))
self.assertEqual(len(response.context['notifications']), self.message_count-1)
response = self.client.get(reverse('notifications:unread'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['notifications']), len(self.to_user.notifications.unread()))
self.assertEqual(len(response.context['notifications']), self.message_count-1)
def test_unread_count_api(self):
self.login('to', 'pwd')
response = self.client.get(reverse('notifications:live_unread_notification_count'))
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(list(data.keys()), ['unread_count'])
self.assertEqual(data['unread_count'], 10)
Notification.objects.filter(recipient=self.to_user).mark_all_as_read()
response = self.client.get(reverse('notifications:live_unread_notification_count'))
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(list(data.keys()), ['unread_count'])
self.assertEqual(data['unread_count'], 0)
notify.send(self.from_user, recipient=self.to_user, verb='commented', action_object=self.from_user)
response = self.client.get(reverse('notifications:live_unread_notification_count'))
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(list(data.keys()), ['unread_count'])
self.assertEqual(data['unread_count'], 1)
def test_unread_list_api(self):
self.login('to', 'pwd')
response = self.client.get(reverse('notifications:live_unread_notification_list'))
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(sorted(list(data.keys())), ['unread_count', 'unread_list'])
self.assertEqual(data['unread_count'], 10)
self.assertEqual(len(data['unread_list']), 5)
response = self.client.get(reverse('notifications:live_unread_notification_list')+"?max=12")
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(sorted(list(data.keys())), ['unread_count', 'unread_list'])
self.assertEqual(data['unread_count'], 10)
self.assertEqual(len(data['unread_list']), 10)
# Test with a bad 'max' value
response = self.client.get(reverse('notifications:live_unread_notification_list')+"?max=this_is_wrong")
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(sorted(list(data.keys())), ['unread_count', 'unread_list'])
self.assertEqual(data['unread_count'], 10)
self.assertEqual(len(data['unread_list']), 5)
Notification.objects.filter(recipient=self.to_user).mark_all_as_read()
response = self.client.get(reverse('notifications:live_unread_notification_list'))
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(sorted(list(data.keys())), ['unread_count', 'unread_list'])
self.assertEqual(data['unread_count'], 0)
self.assertEqual(len(data['unread_list']), 0)
notify.send(self.from_user, recipient=self.to_user, verb='commented', action_object=self.from_user)
response = self.client.get(reverse('notifications:live_unread_notification_list'))
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(sorted(list(data.keys())), ['unread_count', 'unread_list'])
self.assertEqual(data['unread_count'], 1)
self.assertEqual(len(data['unread_list']), 1)
self.assertEqual(data['unread_list'][0]['verb'], 'commented')
def test_live_update_tags(self):
from django.shortcuts import render
self.login('to', 'pwd')
self.factory = RequestFactory()
request = self.factory.get('/notification/live_updater')
request.user = self.to_user
page = render(request, 'notifications/test_tags.html', {'request': request})
#TODO: Add more tests to check what is being output.
|
|
# TNC Python interface
# @(#) $Jeannot: tnc.py,v 1.11 2005/01/28 18:27:31 js Exp $
# Copyright (c) 2004-2005, Jean-Sebastien Roy ([email protected])
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
TNC: A Python interface to the TNC non-linear optimizer
TNC is a non-linear optimizer. To use it, you must provide a function to
minimize. The function must take one argument: the list of coordinates where to
evaluate the function; and it must return either a tuple, whose first element is the
value of the function, and whose second argument is the gradient of the function
(as a list of values); or None, to abort the minimization.
"""
from scipy.optimize import moduleTNC
from .optimize import (MemoizeJac, OptimizeResult, _check_unknown_options,
_prepare_scalar_function)
from ._constraints import old_bound_to_new
from numpy import inf, array, zeros, asfarray
__all__ = ['fmin_tnc']
MSG_NONE = 0 # No messages
MSG_ITER = 1 # One line per iteration
MSG_INFO = 2 # Informational messages
MSG_VERS = 4 # Version info
MSG_EXIT = 8 # Exit reasons
MSG_ALL = MSG_ITER + MSG_INFO + MSG_VERS + MSG_EXIT
MSGS = {
MSG_NONE: "No messages",
MSG_ITER: "One line per iteration",
MSG_INFO: "Informational messages",
MSG_VERS: "Version info",
MSG_EXIT: "Exit reasons",
MSG_ALL: "All messages"
}
INFEASIBLE = -1 # Infeasible (lower bound > upper bound)
LOCALMINIMUM = 0 # Local minimum reached (|pg| ~= 0)
FCONVERGED = 1 # Converged (|f_n-f_(n-1)| ~= 0)
XCONVERGED = 2 # Converged (|x_n-x_(n-1)| ~= 0)
MAXFUN = 3 # Max. number of function evaluations reached
LSFAIL = 4 # Linear search failed
CONSTANT = 5 # All lower bounds are equal to the upper bounds
NOPROGRESS = 6 # Unable to progress
USERABORT = 7 # User requested end of minimization
RCSTRINGS = {
INFEASIBLE: "Infeasible (lower bound > upper bound)",
LOCALMINIMUM: "Local minimum reached (|pg| ~= 0)",
FCONVERGED: "Converged (|f_n-f_(n-1)| ~= 0)",
XCONVERGED: "Converged (|x_n-x_(n-1)| ~= 0)",
MAXFUN: "Max. number of function evaluations reached",
LSFAIL: "Linear search failed",
CONSTANT: "All lower bounds are equal to the upper bounds",
NOPROGRESS: "Unable to progress",
USERABORT: "User requested end of minimization"
}
# Changes to interface made by Travis Oliphant, Apr. 2004 for inclusion in
# SciPy
def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0,
bounds=None, epsilon=1e-8, scale=None, offset=None,
messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1,
stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1,
rescale=-1, disp=None, callback=None):
"""
Minimize a function with variables subject to bounds, using
gradient information in a truncated Newton algorithm. This
method wraps a C implementation of the algorithm.
Parameters
----------
func : callable ``func(x, *args)``
Function to minimize. Must do one of:
1. Return f and g, where f is the value of the function and g its
gradient (a list of floats).
2. Return the function value but supply gradient function
separately as `fprime`.
3. Return the function value and set ``approx_grad=True``.
If the function returns None, the minimization
is aborted.
x0 : array_like
Initial estimate of minimum.
fprime : callable ``fprime(x, *args)``, optional
Gradient of `func`. If None, then either `func` must return the
function value and the gradient (``f,g = func(x, *args)``)
or `approx_grad` must be True.
args : tuple, optional
Arguments to pass to function.
approx_grad : bool, optional
If true, approximate the gradient numerically.
bounds : list, optional
(min, max) pairs for each element in x0, defining the
bounds on that parameter. Use None or +/-inf for one of
min or max when there is no bound in that direction.
epsilon : float, optional
Used if approx_grad is True. The stepsize in a finite
difference approximation for fprime.
scale : array_like, optional
Scaling factors to apply to each variable. If None, the
factors are up-low for interval bounded variables and
1+|x| for the others. Defaults to None.
offset : array_like, optional
Value to subtract from each variable. If None, the
offsets are (up+low)/2 for interval bounded variables
and x for the others.
messages : int, optional
Bit mask used to select messages display during
minimization values defined in the MSGS dict. Defaults to
MGS_ALL.
disp : int, optional
Integer interface to messages. 0 = no message, 5 = all messages
maxCGit : int, optional
Maximum number of hessian*vector evaluations per main
iteration. If maxCGit == 0, the direction chosen is
-gradient if maxCGit < 0, maxCGit is set to
max(1,min(50,n/2)). Defaults to -1.
maxfun : int, optional
Maximum number of function evaluation. If None, maxfun is
set to max(100, 10*len(x0)). Defaults to None.
eta : float, optional
Severity of the line search. If < 0 or > 1, set to 0.25.
Defaults to -1.
stepmx : float, optional
Maximum step for the line search. May be increased during
call. If too small, it will be set to 10.0. Defaults to 0.
accuracy : float, optional
Relative precision for finite difference calculations. If
<= machine_precision, set to sqrt(machine_precision).
Defaults to 0.
fmin : float, optional
Minimum function value estimate. Defaults to 0.
ftol : float, optional
Precision goal for the value of f in the stopping criterion.
If ftol < 0.0, ftol is set to 0.0 defaults to -1.
xtol : float, optional
Precision goal for the value of x in the stopping
criterion (after applying x scaling factors). If xtol <
0.0, xtol is set to sqrt(machine_precision). Defaults to
-1.
pgtol : float, optional
Precision goal for the value of the projected gradient in
the stopping criterion (after applying x scaling factors).
If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy).
Setting it to 0.0 is not recommended. Defaults to -1.
rescale : float, optional
Scaling factor (in log10) used to trigger f value
rescaling. If 0, rescale at each iteration. If a large
value, never rescale. If < 0, rescale is set to 1.3.
callback : callable, optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
Returns
-------
x : ndarray
The solution.
nfeval : int
The number of function evaluations.
rc : int
Return code, see below
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'TNC' `method` in particular.
Notes
-----
The underlying algorithm is truncated Newton, also called
Newton Conjugate-Gradient. This method differs from
scipy.optimize.fmin_ncg in that
1. it wraps a C implementation of the algorithm
2. it allows each variable to be given an upper and lower bound.
The algorithm incorporates the bound constraints by determining
the descent direction as in an unconstrained truncated Newton,
but never taking a step-size large enough to leave the space
of feasible x's. The algorithm keeps track of a set of
currently active constraints, and ignores them when computing
the minimum allowable step size. (The x's associated with the
active constraint are kept fixed.) If the maximum allowable
step size is zero then a new constraint is added. At the end
of each iteration one of the constraints may be deemed no
longer active and removed. A constraint is considered
no longer active is if it is currently active
but the gradient for that variable points inward from the
constraint. The specific constraint removed is the one
associated with the variable of largest index whose
constraint is no longer active.
Return codes are defined as follows::
-1 : Infeasible (lower bound > upper bound)
0 : Local minimum reached (|pg| ~= 0)
1 : Converged (|f_n-f_(n-1)| ~= 0)
2 : Converged (|x_n-x_(n-1)| ~= 0)
3 : Max. number of function evaluations reached
4 : Linear search failed
5 : All lower bounds are equal to the upper bounds
6 : Unable to progress
7 : User requested end of minimization
References
----------
Wright S., Nocedal J. (2006), 'Numerical Optimization'
Nash S.G. (1984), "Newton-Type Minimization Via the Lanczos Method",
SIAM Journal of Numerical Analysis 21, pp. 770-778
"""
# handle fprime/approx_grad
if approx_grad:
fun = func
jac = None
elif fprime is None:
fun = MemoizeJac(func)
jac = fun.derivative
else:
fun = func
jac = fprime
if disp is not None: # disp takes precedence over messages
mesg_num = disp
else:
mesg_num = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
4:MSG_EXIT, 5:MSG_ALL}.get(messages, MSG_ALL)
# build options
opts = {'eps': epsilon,
'scale': scale,
'offset': offset,
'mesg_num': mesg_num,
'maxCGit': maxCGit,
'maxiter': maxfun,
'eta': eta,
'stepmx': stepmx,
'accuracy': accuracy,
'minfev': fmin,
'ftol': ftol,
'xtol': xtol,
'gtol': pgtol,
'rescale': rescale,
'disp': False}
res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, **opts)
return res['x'], res['nfev'], res['status']
def _minimize_tnc(fun, x0, args=(), jac=None, bounds=None,
eps=1e-8, scale=None, offset=None, mesg_num=None,
maxCGit=-1, maxiter=None, eta=-1, stepmx=0, accuracy=0,
minfev=0, ftol=-1, xtol=-1, gtol=-1, rescale=-1, disp=False,
callback=None, finite_diff_rel_step=None, **unknown_options):
"""
Minimize a scalar function of one or more variables using a truncated
Newton (TNC) algorithm.
Options
-------
eps : float or ndarray
If `jac is None` the absolute step size used for numerical
approximation of the jacobian via forward differences.
scale : list of floats
Scaling factors to apply to each variable. If None, the
factors are up-low for interval bounded variables and
1+|x] fo the others. Defaults to None.
offset : float
Value to subtract from each variable. If None, the
offsets are (up+low)/2 for interval bounded variables
and x for the others.
disp : bool
Set to True to print convergence messages.
maxCGit : int
Maximum number of hessian*vector evaluations per main
iteration. If maxCGit == 0, the direction chosen is
-gradient if maxCGit < 0, maxCGit is set to
max(1,min(50,n/2)). Defaults to -1.
maxiter : int
Maximum number of function evaluation. If None, `maxiter` is
set to max(100, 10*len(x0)). Defaults to None.
eta : float
Severity of the line search. If < 0 or > 1, set to 0.25.
Defaults to -1.
stepmx : float
Maximum step for the line search. May be increased during
call. If too small, it will be set to 10.0. Defaults to 0.
accuracy : float
Relative precision for finite difference calculations. If
<= machine_precision, set to sqrt(machine_precision).
Defaults to 0.
minfev : float
Minimum function value estimate. Defaults to 0.
ftol : float
Precision goal for the value of f in the stopping criterion.
If ftol < 0.0, ftol is set to 0.0 defaults to -1.
xtol : float
Precision goal for the value of x in the stopping
criterion (after applying x scaling factors). If xtol <
0.0, xtol is set to sqrt(machine_precision). Defaults to
-1.
gtol : float
Precision goal for the value of the projected gradient in
the stopping criterion (after applying x scaling factors).
If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy).
Setting it to 0.0 is not recommended. Defaults to -1.
rescale : float
Scaling factor (in log10) used to trigger f value
rescaling. If 0, rescale at each iteration. If a large
value, never rescale. If < 0, rescale is set to 1.3.
finite_diff_rel_step : None or array_like, optional
If `jac in ['2-point', '3-point', 'cs']` the relative step size to
use for numerical approximation of the jacobian. The absolute step
size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``,
possibly adjusted to fit into the bounds. For ``method='3-point'``
the sign of `h` is ignored. If None (default) then step is selected
automatically.
"""
_check_unknown_options(unknown_options)
maxfun = maxiter
fmin = minfev
pgtol = gtol
x0 = asfarray(x0).flatten()
n = len(x0)
if bounds is None:
bounds = [(None,None)] * n
if len(bounds) != n:
raise ValueError('length of x0 != length of bounds')
new_bounds = old_bound_to_new(bounds)
if mesg_num is not None:
messages = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
4:MSG_EXIT, 5:MSG_ALL}.get(mesg_num, MSG_ALL)
elif disp:
messages = MSG_ALL
else:
messages = MSG_NONE
sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps,
finite_diff_rel_step=finite_diff_rel_step,
bounds=new_bounds)
func_and_grad = sf.fun_and_grad
"""
low, up : the bounds (lists of floats)
if low is None, the lower bounds are removed.
if up is None, the upper bounds are removed.
low and up defaults to None
"""
low = zeros(n)
up = zeros(n)
for i in range(n):
if bounds[i] is None:
l, u = -inf, inf
else:
l,u = bounds[i]
if l is None:
low[i] = -inf
else:
low[i] = l
if u is None:
up[i] = inf
else:
up[i] = u
if scale is None:
scale = array([])
if offset is None:
offset = array([])
if maxfun is None:
maxfun = max(100, 10*len(x0))
rc, nf, nit, x = moduleTNC.minimize(func_and_grad, x0, low, up, scale,
offset, messages, maxCGit, maxfun,
eta, stepmx, accuracy, fmin, ftol,
xtol, pgtol, rescale, callback)
funv, jacv = func_and_grad(x)
return OptimizeResult(x=x, fun=funv, jac=jacv, nfev=sf.nfev,
nit=nit, status=rc, message=RCSTRINGS[rc],
success=(-1 < rc < 3))
if __name__ == '__main__':
# Examples for TNC
def example():
print("Example")
# A function to minimize
def function(x):
f = pow(x[0],2.0)+pow(abs(x[1]),3.0)
g = [0,0]
g[0] = 2.0*x[0]
g[1] = 3.0*pow(abs(x[1]),2.0)
if x[1] < 0:
g[1] = -g[1]
return f, g
# Optimizer call
x, nf, rc = fmin_tnc(function, [-7, 3], bounds=([-10, 1], [10, 10]))
print("After", nf, "function evaluations, TNC returned:", RCSTRINGS[rc])
print("x =", x)
print("exact value = [0, 1]")
print()
example()
|
|
# Copyright (c) 2009-2015 Denis Bilenko. See LICENSE for details.
"""
Event-loop hub.
"""
from __future__ import absolute_import
# XXX: FIXME: Refactor to make this smaller
# pylint:disable=too-many-lines
from functools import partial as _functools_partial
import os
import sys
import traceback
from greenlet import greenlet as RawGreenlet, getcurrent, GreenletExit
__all__ = [
'getcurrent',
'GreenletExit',
'spawn_raw',
'sleep',
'kill',
'signal',
'reinit',
'get_hub',
'Hub',
'Waiter',
]
from gevent._compat import string_types
from gevent._compat import xrange
from gevent._util import _NONE
from gevent._util import readproperty
if sys.version_info[0] <= 2:
import thread # pylint:disable=import-error
else:
import _thread as thread # python 2 pylint:disable=import-error
# These must be the "real" native thread versions,
# not monkey-patched.
threadlocal = thread._local
class _threadlocal(threadlocal):
def __init__(self):
# Use a class with an initializer so that we can test
# for 'is None' instead of catching AttributeError, making
# the code cleaner and possibly solving some corner cases
# (like #687)
threadlocal.__init__(self)
self.Hub = None
self.loop = None
self.hub = None
_threadlocal = _threadlocal()
get_ident = thread.get_ident
MAIN_THREAD = get_ident()
class LoopExit(Exception):
"""
Exception thrown when the hub finishes running.
In a normal application, this is never thrown or caught
explicitly. The internal implementation of functions like
:func:`join` and :func:`joinall` may catch it, but user code
generally should not.
.. caution::
Errors in application programming can also lead to this exception being
raised. Some examples include (but are not limited too):
- greenlets deadlocking on a lock;
- using a socket or other gevent object with native thread
affinity from a different thread
"""
pass
class BlockingSwitchOutError(AssertionError):
pass
class InvalidSwitchError(AssertionError):
pass
class ConcurrentObjectUseError(AssertionError):
# raised when an object is used (waited on) by two greenlets
# independently, meaning the object was entered into a blocking
# state by one greenlet and then another while still blocking in the
# first one
pass
def spawn_raw(function, *args, **kwargs):
"""
Create a new :class:`greenlet.greenlet` object and schedule it to
run ``function(*args, **kwargs)``.
This returns a raw :class:`~greenlet.greenlet` which does not have all the useful
methods that :class:`gevent.Greenlet` has. Typically, applications
should prefer :func:`~gevent.spawn`, but this method may
occasionally be useful as an optimization if there are many
greenlets involved.
.. versionchanged:: 1.1b1
If *function* is not callable, immediately raise a :exc:`TypeError`
instead of spawning a greenlet that will raise an uncaught TypeError.
.. versionchanged:: 1.1rc2
Accept keyword arguments for ``function`` as previously (incorrectly)
documented. Note that this may incur an additional expense.
.. versionchanged:: 1.1a3
Verify that ``function`` is callable, raising a TypeError if not. Previously,
the spawned greenlet would have failed the first time it was switched to.
"""
if not callable(function):
raise TypeError("function must be callable")
hub = get_hub()
# The callback class object that we use to run this doesn't
# accept kwargs (and those objects are heavily used, as well as being
# implemented twice in core.ppyx and corecffi.py) so do it with a partial
if kwargs:
function = _functools_partial(function, *args, **kwargs)
g = RawGreenlet(function, hub)
hub.loop.run_callback(g.switch)
else:
g = RawGreenlet(function, hub)
hub.loop.run_callback(g.switch, *args)
return g
def sleep(seconds=0, ref=True):
"""
Put the current greenlet to sleep for at least *seconds*.
*seconds* may be specified as an integer, or a float if fractional
seconds are desired.
.. tip:: In the current implementation, a value of 0 (the default)
means to yield execution to any other runnable greenlets, but
this greenlet may be scheduled again before the event loop
cycles (in an extreme case, a greenlet that repeatedly sleeps
with 0 can prevent greenlets that are ready to do I/O from
being scheduled for some (small) period of time); a value greater than
0, on the other hand, will delay running this greenlet until
the next iteration of the loop.
If *ref* is False, the greenlet running ``sleep()`` will not prevent :func:`gevent.wait`
from exiting.
.. seealso:: :func:`idle`
"""
hub = get_hub()
loop = hub.loop
if seconds <= 0:
waiter = Waiter()
loop.run_callback(waiter.switch)
waiter.get()
else:
hub.wait(loop.timer(seconds, ref=ref))
def idle(priority=0):
"""
Cause the calling greenlet to wait until the event loop is idle.
Idle is defined as having no other events of the same or higher
*priority* pending. That is, as long as sockets, timeouts or even
signals of the same or higher priority are being processed, the loop
is not idle.
.. seealso:: :func:`sleep`
"""
hub = get_hub()
watcher = hub.loop.idle()
if priority:
watcher.priority = priority
hub.wait(watcher)
def kill(greenlet, exception=GreenletExit):
"""
Kill greenlet asynchronously. The current greenlet is not unscheduled.
.. note::
The method :meth:`Greenlet.kill` method does the same and
more (and the same caveats listed there apply here). However, the MAIN
greenlet - the one that exists initially - does not have a
``kill()`` method, and neither do any created with :func:`spawn_raw`,
so you have to use this function.
.. caution:: Use care when killing greenlets. If they are not prepared for
exceptions, this could result in corrupted state.
.. versionchanged:: 1.1a2
If the ``greenlet`` has a :meth:`kill <Greenlet.kill>` method, calls it. This prevents a
greenlet from being switched to for the first time after it's been
killed but not yet executed.
"""
if not greenlet.dead:
if hasattr(greenlet, 'kill'):
# dealing with gevent.greenlet.Greenlet. Use it, especially
# to avoid allowing one to be switched to for the first time
# after it's been killed
greenlet.kill(exception=exception, block=False)
else:
get_hub().loop.run_callback(greenlet.throw, exception)
class signal(object):
"""
Call the *handler* with the *args* and *kwargs* when the process
receives the signal *signalnum*.
The *handler* will be run in a new greenlet when the signal is delivered.
This returns an object with the useful method ``cancel``, which, when called,
will prevent future deliveries of *signalnum* from calling *handler*.
.. note::
This may not operate correctly with SIGCHLD if libev child watchers
are used (as they are by default with os.fork).
.. versionchanged:: 1.2a1
The ``handler`` argument is required to be callable at construction time.
"""
# XXX: This is manually documented in gevent.rst while it is aliased in
# the gevent module.
greenlet_class = None
def __init__(self, signalnum, handler, *args, **kwargs):
if not callable(handler):
raise TypeError("signal handler must be callable.")
self.hub = get_hub()
self.watcher = self.hub.loop.signal(signalnum, ref=False)
self.watcher.start(self._start)
self.handler = handler
self.args = args
self.kwargs = kwargs
if self.greenlet_class is None:
from gevent import Greenlet
self.greenlet_class = Greenlet
def _get_ref(self):
return self.watcher.ref
def _set_ref(self, value):
self.watcher.ref = value
ref = property(_get_ref, _set_ref)
del _get_ref, _set_ref
def cancel(self):
self.watcher.stop()
def _start(self):
try:
greenlet = self.greenlet_class(self.handle)
greenlet.switch()
except: # pylint:disable=bare-except
self.hub.handle_error(None, *sys._exc_info()) # pylint:disable=no-member
def handle(self):
try:
self.handler(*self.args, **self.kwargs)
except: # pylint:disable=bare-except
self.hub.handle_error(None, *sys.exc_info())
def reinit():
"""
Prepare the gevent hub to run in a new (forked) process.
This should be called *immediately* after :func:`os.fork` in the
child process. This is done automatically by
:func:`gevent.os.fork` or if the :mod:`os` module has been
monkey-patched. If this function is not called in a forked
process, symptoms may include hanging of functions like
:func:`socket.getaddrinfo`, and the hub's threadpool is unlikely
to work.
.. note:: Registered fork watchers may or may not run before
this function (and thus ``gevent.os.fork``) return. If they have
not run, they will run "soon", after an iteration of the event loop.
You can force this by inserting a few small (but non-zero) calls to :func:`sleep`
after fork returns. (As of gevent 1.1 and before, fork watchers will
not have run, but this may change in the future.)
.. note:: This function may be removed in a future major release
if the fork process can be more smoothly managed.
.. warning:: See remarks in :func:`gevent.os.fork` about greenlets
and libev watchers in the child process.
"""
# The loop reinit function in turn calls libev's ev_loop_fork
# function.
hub = _get_hub()
if hub is not None:
# Note that we reinit the existing loop, not destroy it.
# See https://github.com/gevent/gevent/issues/200.
hub.loop.reinit()
# libev's fork watchers are slow to fire because the only fire
# at the beginning of a loop; due to our use of callbacks that
# run at the end of the loop, that may be too late. The
# threadpool and resolvers depend on the fork handlers being
# run (specifically, the threadpool will fail in the forked
# child if there were any threads in it, which there will be
# if the resolver_thread was in use (the default) before the
# fork.)
#
# If the forked process wants to use the threadpool or
# resolver immediately (in a queued callback), it would hang.
#
# The below is a workaround. Fortunately, both of these
# methods are idempotent and can be called multiple times
# following a fork if the suddenly started working, or were
# already working on some platforms. Other threadpools and fork handlers
# will be called at an arbitrary time later ('soon')
if hasattr(hub.threadpool, '_on_fork'):
hub.threadpool._on_fork()
# resolver_ares also has a fork watcher that's not firing
if hasattr(hub.resolver, '_on_fork'):
hub.resolver._on_fork()
# TODO: We'd like to sleep for a non-zero amount of time to force the loop to make a
# pass around before returning to this greenlet. That will allow any
# user-provided fork watchers to run. (Two calls are necessary.) HOWEVER, if
# we do this, certain tests that heavily mix threads and forking,
# like 2.7/test_threading:test_reinit_tls_after_fork, fail. It's not immediately clear
# why.
#sleep(0.00001)
#sleep(0.00001)
def get_hub_class():
"""Return the type of hub to use for the current thread.
If there's no type of hub for the current thread yet, 'gevent.hub.Hub' is used.
"""
hubtype = _threadlocal.Hub
if hubtype is None:
hubtype = _threadlocal.Hub = Hub
return hubtype
def get_hub(*args, **kwargs):
"""
Return the hub for the current thread.
If a hub does not exist in the current thread, a new one is
created of the type returned by :func:`get_hub_class`.
"""
hub = _threadlocal.hub
if hub is None:
hubtype = get_hub_class()
hub = _threadlocal.hub = hubtype(*args, **kwargs)
return hub
def _get_hub():
"""Return the hub for the current thread.
Return ``None`` if no hub has been created yet.
"""
return _threadlocal.hub
def set_hub(hub):
_threadlocal.hub = hub
def _import(path):
# pylint:disable=too-many-branches
if isinstance(path, list):
if not path:
raise ImportError('Cannot import from empty list: %r' % (path, ))
for item in path[:-1]:
try:
return _import(item)
except ImportError:
pass
return _import(path[-1])
if not isinstance(path, string_types):
return path
if '.' not in path:
raise ImportError("Cannot import %r (required format: [path/][package.]module.class)" % path)
if '/' in path:
package_path, path = path.rsplit('/', 1)
sys.path = [package_path] + sys.path
else:
package_path = None
try:
module, item = path.rsplit('.', 1)
x = __import__(module)
for attr in path.split('.')[1:]:
oldx = x
x = getattr(x, attr, _NONE)
if x is _NONE:
raise ImportError('Cannot import %r from %r' % (attr, oldx))
return x
finally:
try:
sys.path.remove(package_path)
except ValueError:
pass
def config(default, envvar):
result = os.environ.get(envvar) or default # absolute import gets confused pylint: disable=no-member
if isinstance(result, string_types):
return result.split(',')
return result
def resolver_config(default, envvar):
result = config(default, envvar)
return [_resolvers.get(x, x) for x in result]
_resolvers = {'ares': 'gevent.resolver_ares.Resolver',
'thread': 'gevent.resolver_thread.Resolver',
'block': 'gevent.socket.BlockingResolver'}
_DEFAULT_LOOP_CLASS = 'gevent.core.loop'
class Hub(RawGreenlet):
"""A greenlet that runs the event loop.
It is created automatically by :func:`get_hub`.
**Switching**
Every time this greenlet (i.e., the event loop) is switched *to*, if
the current greenlet has a ``switch_out`` method, it will be called. This
allows a greenlet to take some cleanup actions before yielding control. This method
should not call any gevent blocking functions.
"""
#: If instances of these classes are raised into the event loop,
#: they will be propagated out to the main greenlet (where they will
#: usually be caught by Python itself)
SYSTEM_ERROR = (KeyboardInterrupt, SystemExit, SystemError)
#: Instances of these classes are not considered to be errors and
#: do not get logged/printed when raised by the event loop.
NOT_ERROR = (GreenletExit, SystemExit)
loop_class = config(_DEFAULT_LOOP_CLASS, 'GEVENT_LOOP')
# For the standard class, go ahead and import it when this class
# is defined. This is no loss of generality because the envvar is
# only read when this class is defined, and we know that the
# standard class will be available. This can solve problems with
# the class being imported from multiple threads at once, leading
# to one of the imports failing. Only do this for the object we
# need in the constructor, as the rest of the factories are
# themselves handled lazily. See #687. (People using a custom loop_class
# can probably manage to get_hub() from the main thread or otherwise import
# that loop_class themselves.)
if loop_class == [_DEFAULT_LOOP_CLASS]:
loop_class = [_import(loop_class)]
resolver_class = ['gevent.resolver_thread.Resolver',
'gevent.resolver_ares.Resolver',
'gevent.socket.BlockingResolver']
#: The class or callable object, or the name of a factory function or class,
#: that will be used to create :attr:`resolver`. By default, configured according to
#: :doc:`dns`. If a list, a list of objects in preference order.
resolver_class = resolver_config(resolver_class, 'GEVENT_RESOLVER')
threadpool_class = config('gevent.threadpool.ThreadPool', 'GEVENT_THREADPOOL')
backend = config(None, 'GEVENT_BACKEND')
threadpool_size = 10
# using pprint.pformat can override custom __repr__ methods on dict/list
# subclasses, which can be a security concern
format_context = 'pprint.saferepr'
def __init__(self, loop=None, default=None):
RawGreenlet.__init__(self)
if hasattr(loop, 'run'):
if default is not None:
raise TypeError("Unexpected argument: default")
self.loop = loop
elif _threadlocal.loop is not None:
# Reuse a loop instance previously set by
# destroying a hub without destroying the associated
# loop. See #237 and #238.
self.loop = _threadlocal.loop
else:
if default is None and get_ident() != MAIN_THREAD:
default = False
loop_class = _import(self.loop_class)
if loop is None:
loop = self.backend
self.loop = loop_class(flags=loop, default=default)
self._resolver = None
self._threadpool = None
self.format_context = _import(self.format_context)
def __repr__(self):
if self.loop is None:
info = 'destroyed'
else:
try:
info = self.loop._format()
except Exception as ex: # pylint:disable=broad-except
info = str(ex) or repr(ex) or 'error'
result = '<%s at 0x%x %s' % (self.__class__.__name__, id(self), info)
if self._resolver is not None:
result += ' resolver=%r' % self._resolver
if self._threadpool is not None:
result += ' threadpool=%r' % self._threadpool
return result + '>'
def handle_error(self, context, type, value, tb):
"""
Called by the event loop when an error occurs. The arguments
type, value, and tb are the standard tuple returned by :func:`sys.exc_info`.
Applications can set a property on the hub with this same signature
to override the error handling provided by this class.
Errors that are :attr:`system errors <SYSTEM_ERROR>` are passed
to :meth:`handle_system_error`.
:param context: If this is ``None``, indicates a system error that
should generally result in exiting the loop and being thrown to the
parent greenlet.
"""
if isinstance(value, str):
# Cython can raise errors where the value is a plain string
# e.g., AttributeError, "_semaphore.Semaphore has no attr", <traceback>
value = type(value)
if not issubclass(type, self.NOT_ERROR):
self.print_exception(context, type, value, tb)
if context is None or issubclass(type, self.SYSTEM_ERROR):
self.handle_system_error(type, value)
def handle_system_error(self, type, value):
current = getcurrent()
if current is self or current is self.parent or self.loop is None:
self.parent.throw(type, value)
else:
# in case system error was handled and life goes on
# switch back to this greenlet as well
cb = None
try:
cb = self.loop.run_callback(current.switch)
except: # pylint:disable=bare-except
traceback.print_exc(file=self.exception_stream)
try:
self.parent.throw(type, value)
finally:
if cb is not None:
cb.stop()
@readproperty
def exception_stream(self):
"""
The stream to which exceptions will be written.
Defaults to ``sys.stderr`` unless assigned to.
.. versionadded:: 1.2a1
"""
# Unwrap any FileObjectThread we have thrown around sys.stderr
# (because it can't be used in the hub). Tricky because we are
# called in error situations when it's not safe to import.
stderr = sys.stderr
if type(stderr).__name__ == 'FileObjectThread':
stderr = stderr.io # pylint:disable=no-member
return stderr
def print_exception(self, context, type, value, tb):
# Python 3 does not gracefully handle None value or tb in
# traceback.print_exception() as previous versions did.
# pylint:disable=no-member
errstream = self.exception_stream
if value is None:
errstream.write('%s\n' % type.__name__)
else:
traceback.print_exception(type, value, tb, file=errstream)
del tb
try:
import time
errstream.write(time.ctime())
errstream.write(' ' if context is not None else '\n')
except: # pylint:disable=bare-except
# Possible not safe to import under certain
# error conditions in Python 2
pass
if context is not None:
if not isinstance(context, str):
try:
context = self.format_context(context)
except: # pylint:disable=bare-except
traceback.print_exc(file=self.exception_stream)
context = repr(context)
errstream.write('%s failed with %s\n\n' % (context, getattr(type, '__name__', 'exception'), ))
def switch(self):
switch_out = getattr(getcurrent(), 'switch_out', None)
if switch_out is not None:
switch_out()
return RawGreenlet.switch(self)
def switch_out(self):
raise BlockingSwitchOutError('Impossible to call blocking function in the event loop callback')
def wait(self, watcher):
"""
Wait until the *watcher* (which should not be started) is ready.
The current greenlet will be unscheduled during this time.
.. seealso:: :class:`gevent.core.io`, :class:`gevent.core.timer`,
:class:`gevent.core.signal`, :class:`gevent.core.idle`, :class:`gevent.core.prepare`,
:class:`gevent.core.check`, :class:`gevent.core.fork`, :class:`gevent.core.async`,
:class:`gevent.core.child`, :class:`gevent.core.stat`
"""
waiter = Waiter()
unique = object()
watcher.start(waiter.switch, unique)
try:
result = waiter.get()
if result is not unique:
raise InvalidSwitchError('Invalid switch into %s: %r (expected %r)' % (getcurrent(), result, unique))
finally:
watcher.stop()
def cancel_wait(self, watcher, error):
"""
Cancel an in-progress call to :meth:`wait` by throwing the given *error*
in the waiting greenlet.
"""
if watcher.callback is not None:
self.loop.run_callback(self._cancel_wait, watcher, error)
def _cancel_wait(self, watcher, error):
if watcher.active:
switch = watcher.callback
if switch is not None:
greenlet = getattr(switch, '__self__', None)
if greenlet is not None:
greenlet.throw(error)
def run(self):
"""
Entry-point to running the loop. This method is called automatically
when the hub greenlet is scheduled; do not call it directly.
:raises LoopExit: If the loop finishes running. This means
that there are no other scheduled greenlets, and no active
watchers or servers. In some situations, this indicates a
programming error.
"""
assert self is getcurrent(), 'Do not call Hub.run() directly'
while True:
loop = self.loop
loop.error_handler = self
try:
loop.run()
finally:
loop.error_handler = None # break the refcount cycle
self.parent.throw(LoopExit('This operation would block forever', self))
# this function must never return, as it will cause switch() in the parent greenlet
# to return an unexpected value
# It is still possible to kill this greenlet with throw. However, in that case
# switching to it is no longer safe, as switch will return immediatelly
def join(self, timeout=None):
"""Wait for the event loop to finish. Exits only when there are
no more spawned greenlets, started servers, active timeouts or watchers.
If *timeout* is provided, wait no longer for the specified number of seconds.
Returns True if exited because the loop finished execution.
Returns False if exited because of timeout expired.
"""
assert getcurrent() is self.parent, "only possible from the MAIN greenlet"
if self.dead:
return True
waiter = Waiter()
if timeout is not None:
timeout = self.loop.timer(timeout, ref=False)
timeout.start(waiter.switch)
try:
try:
waiter.get()
except LoopExit:
return True
finally:
if timeout is not None:
timeout.stop()
return False
def destroy(self, destroy_loop=None):
if self._resolver is not None:
self._resolver.close()
del self._resolver
if self._threadpool is not None:
self._threadpool.kill()
del self._threadpool
if destroy_loop is None:
destroy_loop = not self.loop.default
if destroy_loop:
if _threadlocal.loop is self.loop:
# Don't let anyone try to reuse this
_threadlocal.loop = None
self.loop.destroy()
else:
# Store in case another hub is created for this
# thread.
_threadlocal.loop = self.loop
self.loop = None
if _threadlocal.hub is self:
_threadlocal.hub = None
def _get_resolver(self):
if self._resolver is None:
if self.resolver_class is not None:
self.resolver_class = _import(self.resolver_class)
self._resolver = self.resolver_class(hub=self)
return self._resolver
def _set_resolver(self, value):
self._resolver = value
def _del_resolver(self):
del self._resolver
resolver = property(_get_resolver, _set_resolver, _del_resolver)
def _get_threadpool(self):
if self._threadpool is None:
if self.threadpool_class is not None:
self.threadpool_class = _import(self.threadpool_class)
self._threadpool = self.threadpool_class(self.threadpool_size, hub=self)
return self._threadpool
def _set_threadpool(self, value):
self._threadpool = value
def _del_threadpool(self):
del self._threadpool
threadpool = property(_get_threadpool, _set_threadpool, _del_threadpool)
class Waiter(object):
"""
A low level communication utility for greenlets.
Waiter is a wrapper around greenlet's ``switch()`` and ``throw()`` calls that makes them somewhat safer:
* switching will occur only if the waiting greenlet is executing :meth:`get` method currently;
* any error raised in the greenlet is handled inside :meth:`switch` and :meth:`throw`
* if :meth:`switch`/:meth:`throw` is called before the receiver calls :meth:`get`, then :class:`Waiter`
will store the value/exception. The following :meth:`get` will return the value/raise the exception.
The :meth:`switch` and :meth:`throw` methods must only be called from the :class:`Hub` greenlet.
The :meth:`get` method must be called from a greenlet other than :class:`Hub`.
>>> result = Waiter()
>>> timer = get_hub().loop.timer(0.1)
>>> timer.start(result.switch, 'hello from Waiter')
>>> result.get() # blocks for 0.1 seconds
'hello from Waiter'
If switch is called before the greenlet gets a chance to call :meth:`get` then
:class:`Waiter` stores the value.
>>> result = Waiter()
>>> timer = get_hub().loop.timer(0.1)
>>> timer.start(result.switch, 'hi from Waiter')
>>> sleep(0.2)
>>> result.get() # returns immediatelly without blocking
'hi from Waiter'
.. warning::
This a limited and dangerous way to communicate between
greenlets. It can easily leave a greenlet unscheduled forever
if used incorrectly. Consider using safer classes such as
:class:`gevent.event.Event`, :class:`gevent.event.AsyncResult`,
or :class:`gevent.queue.Queue`.
"""
__slots__ = ['hub', 'greenlet', 'value', '_exception']
def __init__(self, hub=None):
if hub is None:
self.hub = get_hub()
else:
self.hub = hub
self.greenlet = None
self.value = None
self._exception = _NONE
def clear(self):
self.greenlet = None
self.value = None
self._exception = _NONE
def __str__(self):
if self._exception is _NONE:
return '<%s greenlet=%s>' % (type(self).__name__, self.greenlet)
elif self._exception is None:
return '<%s greenlet=%s value=%r>' % (type(self).__name__, self.greenlet, self.value)
else:
return '<%s greenlet=%s exc_info=%r>' % (type(self).__name__, self.greenlet, self.exc_info)
def ready(self):
"""Return true if and only if it holds a value or an exception"""
return self._exception is not _NONE
def successful(self):
"""Return true if and only if it is ready and holds a value"""
return self._exception is None
@property
def exc_info(self):
"Holds the exception info passed to :meth:`throw` if :meth:`throw` was called. Otherwise ``None``."
if self._exception is not _NONE:
return self._exception
def switch(self, value=None):
"""Switch to the greenlet if one's available. Otherwise store the value."""
greenlet = self.greenlet
if greenlet is None:
self.value = value
self._exception = None
else:
assert getcurrent() is self.hub, "Can only use Waiter.switch method from the Hub greenlet"
switch = greenlet.switch
try:
switch(value)
except: # pylint:disable=bare-except
self.hub.handle_error(switch, *sys.exc_info())
def switch_args(self, *args):
return self.switch(args)
def throw(self, *throw_args):
"""Switch to the greenlet with the exception. If there's no greenlet, store the exception."""
greenlet = self.greenlet
if greenlet is None:
self._exception = throw_args
else:
assert getcurrent() is self.hub, "Can only use Waiter.switch method from the Hub greenlet"
throw = greenlet.throw
try:
throw(*throw_args)
except: # pylint:disable=bare-except
self.hub.handle_error(throw, *sys.exc_info())
def get(self):
"""If a value/an exception is stored, return/raise it. Otherwise until switch() or throw() is called."""
if self._exception is not _NONE:
if self._exception is None:
return self.value
else:
getcurrent().throw(*self._exception)
else:
if self.greenlet is not None:
raise ConcurrentObjectUseError('This Waiter is already used by %r' % (self.greenlet, ))
self.greenlet = getcurrent()
try:
return self.hub.switch()
finally:
self.greenlet = None
def __call__(self, source):
if source.exception is None:
self.switch(source.value)
else:
self.throw(source.exception)
# can also have a debugging version, that wraps the value in a tuple (self, value) in switch()
# and unwraps it in wait() thus checking that switch() was indeed called
class _MultipleWaiter(Waiter):
"""
An internal extension of Waiter that can be used if multiple objects
must be waited on, and there is a chance that in between waits greenlets
might be switched out. All greenlets that switch to this waiter
will have their value returned.
This does not handle exceptions or throw methods.
"""
__slots__ = ['_values']
def __init__(self, *args, **kwargs):
Waiter.__init__(self, *args, **kwargs)
# we typically expect a relatively small number of these to be outstanding.
# since we pop from the left, a deque might be slightly
# more efficient, but since we're in the hub we avoid imports if
# we can help it to better support monkey-patching, and delaying the import
# here can be impractical (see https://github.com/gevent/gevent/issues/652)
self._values = list()
def switch(self, value): # pylint:disable=signature-differs
self._values.append(value)
Waiter.switch(self, True)
def get(self):
if not self._values:
Waiter.get(self)
Waiter.clear(self)
return self._values.pop(0)
def iwait(objects, timeout=None, count=None):
"""
Iteratively yield *objects* as they are ready, until all (or *count*) are ready
or *timeout* expired.
:param objects: A sequence (supporting :func:`len`) containing objects
implementing the wait protocol (rawlink() and unlink()).
:keyword int count: If not `None`, then a number specifying the maximum number
of objects to wait for. If ``None`` (the default), all objects
are waited for.
:keyword float timeout: If given, specifies a maximum number of seconds
to wait. If the timeout expires before the desired waited-for objects
are available, then this method returns immediately.
.. seealso:: :func:`wait`
.. versionchanged:: 1.1a1
Add the *count* parameter.
.. versionchanged:: 1.1a2
No longer raise :exc:`LoopExit` if our caller switches greenlets
in between items yielded by this function.
"""
# QQQ would be nice to support iterable here that can be generated slowly (why?)
if objects is None:
yield get_hub().join(timeout=timeout)
return
count = len(objects) if count is None else min(count, len(objects))
waiter = _MultipleWaiter()
switch = waiter.switch
if timeout is not None:
timer = get_hub().loop.timer(timeout, priority=-1)
timer.start(switch, _NONE)
try:
for obj in objects:
obj.rawlink(switch)
for _ in xrange(count):
item = waiter.get()
waiter.clear()
if item is _NONE:
return
yield item
finally:
if timeout is not None:
timer.stop()
for aobj in objects:
unlink = getattr(aobj, 'unlink', None)
if unlink:
try:
unlink(switch)
except: # pylint:disable=bare-except
traceback.print_exc()
def wait(objects=None, timeout=None, count=None):
"""
Wait for ``objects`` to become ready or for event loop to finish.
If ``objects`` is provided, it must be a list containing objects
implementing the wait protocol (rawlink() and unlink() methods):
- :class:`gevent.Greenlet` instance
- :class:`gevent.event.Event` instance
- :class:`gevent.lock.Semaphore` instance
- :class:`gevent.subprocess.Popen` instance
If ``objects`` is ``None`` (the default), ``wait()`` blocks until
the current event loop has nothing to do (or until ``timeout`` passes):
- all greenlets have finished
- all servers were stopped
- all event loop watchers were stopped.
If ``count`` is ``None`` (the default), wait for all ``objects``
to become ready.
If ``count`` is a number, wait for (up to) ``count`` objects to become
ready. (For example, if count is ``1`` then the function exits
when any object in the list is ready).
If ``timeout`` is provided, it specifies the maximum number of
seconds ``wait()`` will block.
Returns the list of ready objects, in the order in which they were
ready.
.. seealso:: :func:`iwait`
"""
if objects is None:
return get_hub().join(timeout=timeout)
return list(iwait(objects, timeout, count))
class linkproxy(object):
__slots__ = ['callback', 'obj']
def __init__(self, callback, obj):
self.callback = callback
self.obj = obj
def __call__(self, *args):
callback = self.callback
obj = self.obj
self.callback = None
self.obj = None
callback(obj)
|
|
#!/usr/bin/python2.7
# Compresses the core Blockly files into a single JavaScript file.
#
# Copyright 2012 Google Inc.
# https://developers.google.com/blockly/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script generates two versions of Blockly's core files:
# blockly_compressed.js
# blockly_uncompressed.js
# The compressed file is a concatenation of all of Blockly's core files which
# have been run through Google's Closure Compiler. This is done using the
# online API (which takes a few seconds and requires an Internet connection).
# The uncompressed file is a script that loads in each of Blockly's core files
# one by one. This takes much longer for a browser to load, but is useful
# when debugging code since line numbers are meaningful and variables haven't
# been renamed. The uncompressed file also allows for a faster developement
# cycle since there is no need to rebuild or recompile, just reload.
#
# This script also generates:
# blocks_compressed.js: The compressed common blocks.
# blocks_horizontal_compressed.js: The compressed Scratch horizontal blocks.
# blocks_vertical_compressed.js: The compressed Scratch vertical blocks.
# msg/js/<LANG>.js for every language <LANG> defined in msg/js/<LANG>.json.
import sys
if sys.version_info[0] != 2:
raise Exception("Blockly build only compatible with Python 2.x.\n"
"You are using: " + sys.version)
import errno, glob, httplib, json, os, re, subprocess, threading, urllib
def import_path(fullpath):
"""Import a file with full path specification.
Allows one to import from any directory, something __import__ does not do.
Args:
fullpath: Path and filename of import.
Returns:
An imported module.
"""
path, filename = os.path.split(fullpath)
filename, ext = os.path.splitext(filename)
sys.path.append(path)
module = __import__(filename)
reload(module) # Might be out of date.
del sys.path[-1]
return module
HEADER = ("// Do not edit this file; automatically generated by build.py.\n"
"'use strict';\n")
class Gen_uncompressed(threading.Thread):
"""Generate a JavaScript file that loads Blockly's raw files.
Runs in a separate thread.
"""
def __init__(self, search_paths, vertical):
threading.Thread.__init__(self)
self.search_paths = search_paths
self.vertical = vertical
def run(self):
if self.vertical:
target_filename = 'blockly_uncompressed_vertical.js'
else:
target_filename = 'blockly_uncompressed_horizontal.js'
f = open(target_filename, 'w')
f.write(HEADER)
f.write("""
var isNodeJS = !!(typeof module !== 'undefined' && module.exports &&
typeof window === 'undefined');
if (isNodeJS) {
var window = {};
require('closure-library');
}
window.BLOCKLY_DIR = (function() {
if (!isNodeJS) {
// Find name of current directory.
var scripts = document.getElementsByTagName('script');
var re = new RegExp('(.+)[\/]blockly_uncompressed(_vertical|_horizontal|)\.js$');
for (var i = 0, script; script = scripts[i]; i++) {
var match = re.exec(script.src);
if (match) {
return match[1];
}
}
alert('Could not detect Blockly\\'s directory name.');
}
return '';
})();
window.BLOCKLY_BOOT = function() {
var dir = '';
if (isNodeJS) {
require('closure-library');
dir = 'blockly';
} else {
// Execute after Closure has loaded.
if (!window.goog) {
alert('Error: Closure not found. Read this:\\n' +
'developers.google.com/blockly/guides/modify/web/closure');
}
dir = window.BLOCKLY_DIR.match(/[^\\/]+$/)[0];
}
""")
add_dependency = []
base_path = calcdeps.FindClosureBasePath(self.search_paths)
for dep in calcdeps.BuildDependenciesFromFiles(self.search_paths):
add_dependency.append(calcdeps.GetDepsLine(dep, base_path))
add_dependency.sort() # Deterministic build.
add_dependency = '\n'.join(add_dependency)
# Find the Blockly directory name and replace it with a JS variable.
# This allows blockly_uncompressed.js to be compiled on one computer and be
# used on another, even if the directory name differs.
m = re.search('[\\/]([^\\/]+)[\\/]core[\\/]blockly.js', add_dependency)
add_dependency = re.sub('([\\/])' + re.escape(m.group(1)) +
'([\\/]core[\\/])', '\\1" + dir + "\\2', add_dependency)
f.write(add_dependency + '\n')
provides = []
for dep in calcdeps.BuildDependenciesFromFiles(self.search_paths):
if not dep.filename.startswith(os.pardir + os.sep): # '../'
provides.extend(dep.provides)
provides.sort() # Deterministic build.
f.write('\n')
f.write('// Load Blockly.\n')
for provide in provides:
f.write("goog.require('%s');\n" % provide)
f.write("""
delete this.BLOCKLY_DIR;
delete this.BLOCKLY_BOOT;
};
if (isNodeJS) {
window.BLOCKLY_BOOT();
module.exports = Blockly;
} else {
// Delete any existing Closure (e.g. Soy's nogoog_shim).
document.write('<script>var goog = undefined;</script>');
// Load fresh Closure Library.
document.write('<script src="' + window.BLOCKLY_DIR +
'/../closure-library/closure/goog/base.js"></script>');
document.write('<script>window.BLOCKLY_BOOT();</script>');
}
""")
f.close()
print("SUCCESS: " + target_filename)
class Gen_compressed(threading.Thread):
"""Generate a JavaScript file that contains all of Blockly's core and all
required parts of Closure, compiled together.
Uses the Closure Compiler's online API.
Runs in a separate thread.
"""
def __init__(self, search_paths_vertical, search_paths_horizontal):
threading.Thread.__init__(self)
self.search_paths_vertical = search_paths_vertical
self.search_paths_horizontal = search_paths_horizontal
def run(self):
self.gen_core(True)
self.gen_core(False)
self.gen_blocks("horizontal")
self.gen_blocks("vertical")
self.gen_blocks("common")
self.gen_generator("javascript")
self.gen_generator("python")
self.gen_generator("php")
self.gen_generator("dart")
self.gen_generator("lua")
def gen_core(self, vertical):
if vertical:
target_filename = 'blockly_compressed_vertical.js'
search_paths = self.search_paths_vertical
else:
target_filename = 'blockly_compressed_horizontal.js'
search_paths = self.search_paths_horizontal
# Define the parameters for the POST request.
params = [
("compilation_level", "SIMPLE_OPTIMIZATIONS"),
("use_closure_library", "true"),
("output_format", "json"),
("output_info", "compiled_code"),
("output_info", "warnings"),
("output_info", "errors"),
("output_info", "statistics"),
]
# Read in all the source files.
filenames = calcdeps.CalculateDependencies(search_paths,
[os.path.join("core", "blockly.js")])
filenames.sort() # Deterministic build.
for filename in filenames:
# Filter out the Closure files (the compiler will add them).
if filename.startswith(os.pardir + os.sep): # '../'
continue
f = open(filename)
params.append(("js_code", "".join(f.readlines())))
f.close()
self.do_compile(params, target_filename, filenames, "")
def gen_blocks(self, block_type):
if block_type == "horizontal":
target_filename = "blocks_compressed_horizontal.js"
filenames = glob.glob(os.path.join("blocks_horizontal", "*.js"))
elif block_type == "vertical":
target_filename = "blocks_compressed_vertical.js"
filenames = glob.glob(os.path.join("blocks_vertical", "*.js"))
elif block_type == "common":
target_filename = "blocks_compressed.js"
filenames = glob.glob(os.path.join("blocks_common", "*.js"))
# Define the parameters for the POST request.
params = [
("compilation_level", "SIMPLE_OPTIMIZATIONS"),
("output_format", "json"),
("output_info", "compiled_code"),
("output_info", "warnings"),
("output_info", "errors"),
("output_info", "statistics"),
]
# Read in all the source files.
# Add Blockly.Blocks to be compatible with the compiler.
params.append(("js_code", "goog.provide('Blockly.Blocks');"))
# Add Blockly.Colours for use of centralized colour bank
filenames.append(os.path.join("core", "colours.js"))
filenames.append(os.path.join("core", "constants.js"))
for filename in filenames:
f = open(filename)
params.append(("js_code", "".join(f.readlines())))
f.close()
# Remove Blockly.Blocks to be compatible with Blockly.
remove = "var Blockly={Blocks:{}};"
self.do_compile(params, target_filename, filenames, remove)
def gen_generator(self, language):
target_filename = language + "_compressed.js"
# Define the parameters for the POST request.
params = [
("compilation_level", "SIMPLE_OPTIMIZATIONS"),
("output_format", "json"),
("output_info", "compiled_code"),
("output_info", "warnings"),
("output_info", "errors"),
("output_info", "statistics"),
]
# Read in all the source files.
# Add Blockly.Generator to be compatible with the compiler.
params.append(("js_code", "goog.provide('Blockly.Generator');"))
filenames = glob.glob(
os.path.join("generators", language, "*.js"))
filenames.sort() # Deterministic build.
filenames.insert(0, os.path.join("generators", language + ".js"))
for filename in filenames:
f = open(filename)
params.append(("js_code", "".join(f.readlines())))
f.close()
filenames.insert(0, "[goog.provide]")
# Remove Blockly.Generator to be compatible with Blockly.
remove = "var Blockly={Generator:{}};"
self.do_compile(params, target_filename, filenames, remove)
def do_compile(self, params, target_filename, filenames, remove):
# Send the request to Google.
headers = {"Content-type": "application/x-www-form-urlencoded"}
conn = httplib.HTTPSConnection("closure-compiler.appspot.com")
conn.request("POST", "/compile", urllib.urlencode(params), headers)
response = conn.getresponse()
json_str = response.read()
conn.close()
# Parse the JSON response.
json_data = json.loads(json_str)
def file_lookup(name):
if not name.startswith("Input_"):
return "???"
n = int(name[6:]) - 1
return filenames[n]
if json_data.has_key("serverErrors"):
errors = json_data["serverErrors"]
for error in errors:
print("SERVER ERROR: %s" % target_filename)
print(error["error"])
elif json_data.has_key("errors"):
errors = json_data["errors"]
for error in errors:
print("FATAL ERROR")
print(error["error"])
if error["file"]:
print("%s at line %d:" % (
file_lookup(error["file"]), error["lineno"]))
print(error["line"])
print((" " * error["charno"]) + "^")
sys.exit(1)
else:
if json_data.has_key("warnings"):
warnings = json_data["warnings"]
for warning in warnings:
print("WARNING")
print(warning["warning"])
if warning["file"]:
print("%s at line %d:" % (
file_lookup(warning["file"]), warning["lineno"]))
print(warning["line"])
print((" " * warning["charno"]) + "^")
print()
if not json_data.has_key("compiledCode"):
print("FATAL ERROR: Compiler did not return compiledCode.")
sys.exit(1)
code = HEADER + "\n" + json_data["compiledCode"]
code = code.replace(remove, "")
# Trim down Google's (and only Google's) Apache licences.
# The Closure Compiler preserves these.
LICENSE = re.compile("""/\\*
[\w ]+
Copyright \\d+ Google Inc.
https://developers.google.com/blockly/
Licensed under the Apache License, Version 2.0 \(the "License"\);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
\\*/""")
code = re.sub(LICENSE, "", code)
stats = json_data["statistics"]
original_b = stats["originalSize"]
compressed_b = stats["compressedSize"]
if original_b > 0 and compressed_b > 0:
f = open(target_filename, "w")
f.write(code)
f.close()
original_kb = int(original_b / 1024 + 0.5)
compressed_kb = int(compressed_b / 1024 + 0.5)
ratio = int(float(compressed_b) / float(original_b) * 100 + 0.5)
print("SUCCESS: " + target_filename)
print("Size changed from %d KB to %d KB (%d%%)." % (
original_kb, compressed_kb, ratio))
else:
print("UNKNOWN ERROR")
class Gen_langfiles(threading.Thread):
"""Generate JavaScript file for each natural language supported.
Runs in a separate thread.
"""
def __init__(self):
threading.Thread.__init__(self)
def _rebuild(self, srcs, dests):
# Determine whether any of the files in srcs is newer than any in dests.
try:
return (max(os.path.getmtime(src) for src in srcs) >
min(os.path.getmtime(dest) for dest in dests))
except OSError as e:
# Was a file not found?
if e.errno == errno.ENOENT:
# If it was a source file, we can't proceed.
if e.filename in srcs:
print("Source file missing: " + e.filename)
sys.exit(1)
else:
# If a destination file was missing, rebuild.
return True
else:
print("Error checking file creation times: " + e)
def run(self):
# The files msg/json/{en,qqq,synonyms}.json depend on msg/messages.js.
if self._rebuild([os.path.join("msg", "messages.js")],
[os.path.join("msg", "json", f) for f in
["en.json", "qqq.json", "synonyms.json"]]):
try:
subprocess.check_call([
"python",
os.path.join("i18n", "js_to_json.py"),
"--input_file", "msg/messages.js",
"--output_dir", "msg/json/",
"--quiet"])
except (subprocess.CalledProcessError, OSError) as e:
# Documentation for subprocess.check_call says that CalledProcessError
# will be raised on failure, but I found that OSError is also possible.
print("Error running i18n/js_to_json.py: ", e)
sys.exit(1)
# Checking whether it is necessary to rebuild the js files would be a lot of
# work since we would have to compare each <lang>.json file with each
# <lang>.js file. Rebuilding is easy and cheap, so just go ahead and do it.
try:
# Use create_messages.py to create .js files from .json files.
cmd = [
"python",
os.path.join("i18n", "create_messages.py"),
"--source_lang_file", os.path.join("msg", "json", "en.json"),
"--source_synonym_file", os.path.join("msg", "json", "synonyms.json"),
"--source_constants_file", os.path.join("msg", "json", "constants.json"),
"--key_file", os.path.join("msg", "json", "keys.json"),
"--output_dir", os.path.join("msg", "js"),
"--quiet"]
json_files = glob.glob(os.path.join("msg", "json", "*.json"))
json_files = [file for file in json_files if not
(file.endswith(("keys.json", "synonyms.json", "qqq.json", "constants.json")))]
cmd.extend(json_files)
subprocess.check_call(cmd)
except (subprocess.CalledProcessError, OSError) as e:
print("Error running i18n/create_messages.py: ", e)
sys.exit(1)
# Output list of .js files created.
for f in json_files:
# This assumes the path to the current directory does not contain "json".
f = f.replace("json", "js")
if os.path.isfile(f):
print("SUCCESS: " + f)
else:
print("FAILED to create " + f)
def exclude_vertical(item):
return not item.endswith("block_render_svg_vertical.js")
def exclude_horizontal(item):
return not item.endswith("block_render_svg_horizontal.js")
if __name__ == "__main__":
try:
calcdeps = import_path(os.path.join(
os.path.pardir, "closure-library", "closure", "bin", "calcdeps.py"))
except ImportError:
if os.path.isdir(os.path.join(os.path.pardir, "closure-library-read-only")):
# Dir got renamed when Closure moved from Google Code to GitHub in 2014.
print("Error: Closure directory needs to be renamed from"
"'closure-library-read-only' to 'closure-library'.\n"
"Please rename this directory.")
elif os.path.isdir(os.path.join(os.path.pardir, "google-closure-library")):
# When Closure is installed by npm, it is named "google-closure-library".
#calcdeps = import_path(os.path.join(
# os.path.pardir, "google-closure-library", "closure", "bin", "calcdeps.py"))
print("Error: Closure directory needs to be renamed from"
"'google-closure-library' to 'closure-library'.\n"
"Please rename this directory.")
else:
print("""Error: Closure not found. Read this:
developers.google.com/blockly/guides/modify/web/closure""")
sys.exit(1)
search_paths = calcdeps.ExpandDirectories(
["core", os.path.join(os.path.pardir, "closure-library")])
search_paths_horizontal = filter(exclude_vertical, search_paths)
search_paths_vertical = filter(exclude_horizontal, search_paths)
# Run all tasks in parallel threads.
# Uncompressed is limited by processor speed.
# Compressed is limited by network and server speed.
# Vertical:
Gen_uncompressed(search_paths_vertical, True).start()
# Horizontal:
Gen_uncompressed(search_paths_horizontal, False).start()
# Compressed forms of vertical and horizontal.
Gen_compressed(search_paths_vertical, search_paths_horizontal).start()
# This is run locally in a separate thread.
Gen_langfiles().start()
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Base types for nodes in a GRIT resource tree.
'''
from __future__ import print_function
import ast
import os
import struct
import sys
from xml.sax import saxutils
import six
from grit import constants
from grit import clique
from grit import exception
from grit import util
from grit.node import brotli_util
import grit.format.gzip_string
class Node(object):
'''An item in the tree that has children.'''
# Valid content types that can be returned by _ContentType()
_CONTENT_TYPE_NONE = 0 # No CDATA content but may have children
_CONTENT_TYPE_CDATA = 1 # Only CDATA, no children.
_CONTENT_TYPE_MIXED = 2 # CDATA and children, possibly intermingled
# Types of files to be compressed by default.
_COMPRESS_BY_DEFAULT_EXTENSIONS = ('.js', '.html', '.css', '.svg')
# Types of files to disallow compressing, as it provides no benefit, and can
# potentially even make the file larger.
_COMPRESS_DISALLOWED_EXTENSIONS = ('.png', '.jpg')
# Default nodes to not allowlist skipped
_allowlist_marked_as_skip = False
# A class-static cache to speed up EvaluateExpression().
# Keys are expressions (e.g. 'is_ios and lang == "fr"'). Values are tuples
# (code, variables_in_expr) where code is the compiled expression and can be
# directly eval'd, and variables_in_expr is the list of variable and method
# names used in the expression (e.g. ['is_ios', 'lang']).
eval_expr_cache = {}
def __init__(self):
self.children = [] # A list of child elements
self.mixed_content = [] # A list of u'' and/or child elements (this
# duplicates 'children' but
# is needed to preserve markup-type content).
self.name = u'' # The name of this element
self.attrs = {} # The set of attributes (keys to values)
self.parent = None # Our parent unless we are the root element.
self.uberclique = None # Allows overriding uberclique for parts of tree
self.source = None # File that this node was parsed from
# This context handler allows you to write "with node:" and get a
# line identifying the offending node if an exception escapes from the body
# of the with statement.
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is not None:
print(u'Error processing node %s: %s' % (six.text_type(self), exc_value))
def __iter__(self):
'''A preorder iteration through the tree that this node is the root of.'''
return self.Preorder()
def Preorder(self):
'''Generator that generates first this node, then the same generator for
any child nodes.'''
yield self
for child in self.children:
for iterchild in child.Preorder():
yield iterchild
def ActiveChildren(self):
'''Returns the children of this node that should be included in the current
configuration. Overridden by <if>.'''
return [node for node in self.children if not node.AllowlistMarkedAsSkip()]
def ActiveDescendants(self):
'''Yields the current node and all descendants that should be included in
the current configuration, in preorder.'''
yield self
for child in self.ActiveChildren():
for descendant in child.ActiveDescendants():
yield descendant
def GetRoot(self):
'''Returns the root Node in the tree this Node belongs to.'''
curr = self
while curr.parent:
curr = curr.parent
return curr
# TODO(joi) Use this (currently untested) optimization?:
#if hasattr(self, '_root'):
# return self._root
#curr = self
#while curr.parent and not hasattr(curr, '_root'):
# curr = curr.parent
#if curr.parent:
# self._root = curr._root
#else:
# self._root = curr
#return self._root
def StartParsing(self, name, parent):
'''Called at the start of parsing.
Args:
name: u'elementname'
parent: grit.node.base.Node or subclass or None
'''
assert isinstance(name, six.string_types)
assert not parent or isinstance(parent, Node)
self.name = name
self.parent = parent
def AddChild(self, child):
'''Adds a child to the list of children of this node, if it is a valid
child for the node.'''
assert isinstance(child, Node)
if (not self._IsValidChild(child) or
self._ContentType() == self._CONTENT_TYPE_CDATA):
explanation = 'invalid child %s for parent %s' % (str(child), self.name)
raise exception.UnexpectedChild(explanation)
self.children.append(child)
self.mixed_content.append(child)
def RemoveChild(self, child_id):
'''Removes the first node that has a "name" attribute which
matches "child_id" in the list of immediate children of
this node.
Args:
child_id: String identifying the child to be removed
'''
index = 0
# Safe not to copy since we only remove the first element found
for child in self.children:
name_attr = child.attrs['name']
if name_attr == child_id:
self.children.pop(index)
self.mixed_content.pop(index)
break
index += 1
def AppendContent(self, content):
'''Appends a chunk of text as content of this node.
Args:
content: u'hello'
Return:
None
'''
assert isinstance(content, six.string_types)
if self._ContentType() != self._CONTENT_TYPE_NONE:
self.mixed_content.append(content)
elif content.strip() != '':
raise exception.UnexpectedContent()
def HandleAttribute(self, attrib, value):
'''Informs the node of an attribute that was parsed out of the GRD file
for it.
Args:
attrib: 'name'
value: 'fooblat'
Return:
None
'''
assert isinstance(attrib, six.string_types)
assert isinstance(value, six.string_types)
if self._IsValidAttribute(attrib, value):
self.attrs[attrib] = value
else:
raise exception.UnexpectedAttribute(attrib)
def EndParsing(self):
'''Called at the end of parsing.'''
# TODO(joi) Rewrite this, it's extremely ugly!
if len(self.mixed_content):
if isinstance(self.mixed_content[0], six.string_types):
# Remove leading and trailing chunks of pure whitespace.
while (len(self.mixed_content) and
isinstance(self.mixed_content[0], six.string_types) and
self.mixed_content[0].strip() == ''):
self.mixed_content = self.mixed_content[1:]
# Strip leading and trailing whitespace from mixed content chunks
# at front and back.
if (len(self.mixed_content) and
isinstance(self.mixed_content[0], six.string_types)):
self.mixed_content[0] = self.mixed_content[0].lstrip()
# Remove leading and trailing ''' (used to demarcate whitespace)
if (len(self.mixed_content) and
isinstance(self.mixed_content[0], six.string_types)):
if self.mixed_content[0].startswith("'''"):
self.mixed_content[0] = self.mixed_content[0][3:]
if len(self.mixed_content):
if isinstance(self.mixed_content[-1], six.string_types):
# Same stuff all over again for the tail end.
while (len(self.mixed_content) and
isinstance(self.mixed_content[-1], six.string_types) and
self.mixed_content[-1].strip() == ''):
self.mixed_content = self.mixed_content[:-1]
if (len(self.mixed_content) and
isinstance(self.mixed_content[-1], six.string_types)):
self.mixed_content[-1] = self.mixed_content[-1].rstrip()
if (len(self.mixed_content) and
isinstance(self.mixed_content[-1], six.string_types)):
if self.mixed_content[-1].endswith("'''"):
self.mixed_content[-1] = self.mixed_content[-1][:-3]
# Check that all mandatory attributes are there.
for node_mandatt in self.MandatoryAttributes():
mandatt_list = []
if node_mandatt.find('|') >= 0:
mandatt_list = node_mandatt.split('|')
else:
mandatt_list.append(node_mandatt)
mandatt_option_found = False
for mandatt in mandatt_list:
assert mandatt not in self.DefaultAttributes()
if mandatt in self.attrs:
if not mandatt_option_found:
mandatt_option_found = True
else:
raise exception.MutuallyExclusiveMandatoryAttribute(mandatt)
if not mandatt_option_found:
raise exception.MissingMandatoryAttribute(mandatt)
# Add default attributes if not specified in input file.
for defattr in self.DefaultAttributes():
if not defattr in self.attrs:
self.attrs[defattr] = self.DefaultAttributes()[defattr]
def GetCdata(self):
'''Returns all CDATA of this element, concatenated into a single
string. Note that this ignores any elements embedded in CDATA.'''
return ''.join([c for c in self.mixed_content
if isinstance(c, six.string_types)])
def __str__(self):
'''Returns this node and all nodes below it as an XML document in a Unicode
string.'''
header = u'<?xml version="1.0" encoding="UTF-8"?>\n'
return header + self.FormatXml()
# Some Python 2 glue.
__unicode__ = __str__
def FormatXml(self, indent = u'', one_line = False):
'''Returns this node and all nodes below it as an XML
element in a Unicode string. This differs from __unicode__ in that it does
not include the <?xml> stuff at the top of the string. If one_line is true,
children and CDATA are layed out in a way that preserves internal
whitespace.
'''
assert isinstance(indent, six.string_types)
content_one_line = (one_line or
self._ContentType() == self._CONTENT_TYPE_MIXED)
inside_content = self.ContentsAsXml(indent, content_one_line)
# Then the attributes for this node.
attribs = u''
default_attribs = self.DefaultAttributes()
for attrib, value in sorted(self.attrs.items()):
# Only print an attribute if it is other than the default value.
if attrib not in default_attribs or value != default_attribs[attrib]:
attribs += u' %s=%s' % (attrib, saxutils.quoteattr(value))
# Finally build the XML for our node and return it
if len(inside_content) > 0:
if one_line:
return u'<%s%s>%s</%s>' % (self.name, attribs, inside_content,
self.name)
elif content_one_line:
return u'%s<%s%s>\n%s %s\n%s</%s>' % (
indent, self.name, attribs,
indent, inside_content,
indent, self.name)
else:
return u'%s<%s%s>\n%s\n%s</%s>' % (
indent, self.name, attribs,
inside_content,
indent, self.name)
else:
return u'%s<%s%s />' % (indent, self.name, attribs)
def ContentsAsXml(self, indent, one_line):
'''Returns the contents of this node (CDATA and child elements) in XML
format. If 'one_line' is true, the content will be laid out on one line.'''
assert isinstance(indent, six.string_types)
# Build the contents of the element.
inside_parts = []
last_item = None
for mixed_item in self.mixed_content:
if isinstance(mixed_item, Node):
inside_parts.append(mixed_item.FormatXml(indent + u' ', one_line))
if not one_line:
inside_parts.append(u'\n')
else:
message = mixed_item
# If this is the first item and it starts with whitespace, we add
# the ''' delimiter.
if not last_item and message.lstrip() != message:
message = u"'''" + message
inside_parts.append(util.EncodeCdata(message))
last_item = mixed_item
# If there are only child nodes and no cdata, there will be a spurious
# trailing \n
if len(inside_parts) and inside_parts[-1] == '\n':
inside_parts = inside_parts[:-1]
# If the last item is a string (not a node) and ends with whitespace,
# we need to add the ''' delimiter.
if (isinstance(last_item, six.string_types) and
last_item.rstrip() != last_item):
inside_parts[-1] = inside_parts[-1] + u"'''"
return u''.join(inside_parts)
def SubstituteMessages(self, substituter):
'''Applies substitutions to all messages in the tree.
Called as a final step of RunGatherers.
Args:
substituter: a grit.util.Substituter object.
'''
for child in self.children:
child.SubstituteMessages(substituter)
def _IsValidChild(self, child):
'''Returns true if 'child' is a valid child of this node.
Overridden by subclasses.'''
return False
def _IsValidAttribute(self, name, value):
'''Returns true if 'name' is the name of a valid attribute of this element
and 'value' is a valid value for that attribute. Overriden by
subclasses unless they have only mandatory attributes.'''
return (name in self.MandatoryAttributes() or
name in self.DefaultAttributes())
def _ContentType(self):
'''Returns the type of content this element can have. Overridden by
subclasses. The content type can be one of the _CONTENT_TYPE_XXX constants
above.'''
return self._CONTENT_TYPE_NONE
def MandatoryAttributes(self):
'''Returns a list of attribute names that are mandatory (non-optional)
on the current element. One can specify a list of
"mutually exclusive mandatory" attributes by specifying them as one
element in the list, separated by a "|" character.
'''
return []
def DefaultAttributes(self):
'''Returns a dictionary of attribute names that have defaults, mapped to
the default value. Overridden by subclasses.'''
return {}
def GetCliques(self):
'''Returns all MessageClique objects belonging to this node. Overridden
by subclasses.
Return:
[clique1, clique2] or []
'''
return []
def ToRealPath(self, path_from_basedir):
'''Returns a real path (which can be absolute or relative to the current
working directory), given a path that is relative to the base directory
set for the GRIT input file.
Args:
path_from_basedir: '..'
Return:
'resource'
'''
path_from_basedir = util.PathSearcher.LocatePath(
os.path.expandvars(path_from_basedir),
self.GetRoot().GetBaseDir())
return util.normpath(os.path.join(self.GetRoot().GetBaseDir(),
path_from_basedir ))
def GetInputPath(self):
'''Returns a path, relative to the base directory set for the grd file,
that points to the file the node refers to.
'''
# This implementation works for most nodes that have an input file.
return self.attrs['file']
def UberClique(self):
'''Returns the uberclique that should be used for messages originating in
a given node. If the node itself has its uberclique set, that is what we
use, otherwise we search upwards until we find one. If we do not find one
even at the root node, we set the root node's uberclique to a new
uberclique instance.
'''
node = self
while not node.uberclique and node.parent:
node = node.parent
if not node.uberclique:
node.uberclique = clique.UberClique()
return node.uberclique
def IsTranslateable(self):
'''Returns false if the node has contents that should not be translated,
otherwise returns false (even if the node has no contents).
'''
if not 'translateable' in self.attrs:
return True
else:
return self.attrs['translateable'] == 'true'
def IsAccessibilityWithNoUI(self):
'''Returns true if the node is marked as an accessibility label and the
message isn't shown in the UI. Otherwise returns false. This label is
used to determine if the text requires screenshots.'''
if not 'is_accessibility_with_no_ui' in self.attrs:
return False
else:
return self.attrs['is_accessibility_with_no_ui'] == 'true'
def GetNodeById(self, id):
'''Returns the node in the subtree parented by this node that has a 'name'
attribute matching 'id'. Returns None if no such node is found.
'''
for node in self:
if 'name' in node.attrs and node.attrs['name'] == id:
return node
return None
def GetChildrenOfType(self, type):
'''Returns a list of all subnodes (recursing to all leaves) of this node
that are of the indicated type (or tuple of types).
Args:
type: A type you could use with isinstance().
Return:
A list, possibly empty.
'''
return [child for child in self if isinstance(child, type)]
def GetTextualIds(self):
'''Returns a list of the textual ids of this node.
'''
if 'name' in self.attrs:
return [self.attrs['name']]
return []
@classmethod
def EvaluateExpression(cls, expr, defs, target_platform, extra_variables={}):
'''Worker for EvaluateCondition (below) and conditions in XTB files.'''
if expr in cls.eval_expr_cache:
code, variables_in_expr = cls.eval_expr_cache[expr]
else:
# Get a list of all variable and method names used in the expression.
syntax_tree = ast.parse(expr, mode='eval')
variables_in_expr = [node.id for node in ast.walk(syntax_tree) if
isinstance(node, ast.Name) and node.id not in ('True', 'False')]
code = compile(syntax_tree, filename='<string>', mode='eval')
cls.eval_expr_cache[expr] = code, variables_in_expr
# Set values only for variables that are needed to eval the expression.
variable_map = {}
for name in variables_in_expr:
if name == 'os':
value = target_platform
elif name == 'defs':
value = defs
elif name == 'is_linux':
value = target_platform.startswith('linux')
elif name == 'is_macosx':
value = target_platform == 'darwin'
elif name == 'is_win':
value = target_platform in ('cygwin', 'win32')
elif name == 'is_android':
value = target_platform == 'android'
elif name == 'is_ios':
value = target_platform == 'ios'
elif name == 'is_fuchsia':
value = target_platform == 'fuchsia'
elif name == 'is_bsd':
value = 'bsd' in target_platform
elif name == 'is_posix':
value = (target_platform.startswith('linux')
or target_platform in ('darwin', 'sunos5', 'android', 'ios')
or 'bsd' in target_platform)
elif name == 'pp_ifdef':
def pp_ifdef(symbol):
return symbol in defs
value = pp_ifdef
elif name == 'pp_if':
def pp_if(symbol):
return defs.get(symbol, False)
value = pp_if
elif name in defs:
value = defs[name]
elif name in extra_variables:
value = extra_variables[name]
else:
# Undefined variables default to False.
value = False
variable_map[name] = value
eval_result = eval(code, {}, variable_map)
assert isinstance(eval_result, bool)
return eval_result
def EvaluateCondition(self, expr):
'''Returns true if and only if the Python expression 'expr' evaluates
to true.
The expression is given a few local variables:
- 'lang' is the language currently being output
(the 'lang' attribute of the <output> element).
- 'context' is the current output context
(the 'context' attribute of the <output> element).
- 'defs' is a map of C preprocessor-style symbol names to their values.
- 'os' is the current platform (likely 'linux2', 'win32' or 'darwin').
- 'pp_ifdef(symbol)' is a shorthand for "symbol in defs".
- 'pp_if(symbol)' is a shorthand for "symbol in defs and defs[symbol]".
- 'is_linux', 'is_macosx', 'is_win', 'is_posix' are true if 'os'
matches the given platform.
'''
root = self.GetRoot()
lang = getattr(root, 'output_language', '')
context = getattr(root, 'output_context', '')
defs = getattr(root, 'defines', {})
target_platform = getattr(root, 'target_platform', '')
extra_variables = {
'lang': lang,
'context': context,
}
return Node.EvaluateExpression(
expr, defs, target_platform, extra_variables)
def OnlyTheseTranslations(self, languages):
'''Turns off loading of translations for languages not in the provided list.
Attrs:
languages: ['fr', 'zh_cn']
'''
for node in self:
if (hasattr(node, 'IsTranslation') and
node.IsTranslation() and
node.GetLang() not in languages):
node.DisableLoading()
def FindBooleanAttribute(self, attr, default, skip_self):
'''Searches all ancestors of the current node for the nearest enclosing
definition of the given boolean attribute.
Args:
attr: 'fallback_to_english'
default: What to return if no node defines the attribute.
skip_self: Don't check the current node, only its parents.
'''
p = self.parent if skip_self else self
while p:
value = p.attrs.get(attr, 'default').lower()
if value != 'default':
return (value == 'true')
p = p.parent
return default
def PseudoIsAllowed(self):
'''Returns true if this node is allowed to use pseudo-translations. This
is true by default, unless this node is within a <release> node that has
the allow_pseudo attribute set to false.
'''
return self.FindBooleanAttribute('allow_pseudo',
default=True, skip_self=True)
def ShouldFallbackToEnglish(self):
'''Returns true iff this node should fall back to English when
pseudotranslations are disabled and no translation is available for a
given message.
'''
return self.FindBooleanAttribute('fallback_to_english',
default=False, skip_self=True)
def AllowlistMarkedAsSkip(self):
'''Returns true if the node is marked to be skipped in the output by a
allowlist.
'''
return self._allowlist_marked_as_skip
def SetAllowlistMarkedAsSkip(self, mark_skipped):
'''Sets AllowlistMarkedAsSkip.
'''
self._allowlist_marked_as_skip = mark_skipped
def ExpandVariables(self):
'''Whether we need to expand variables on a given node.'''
return False
def IsResourceMapSource(self):
'''Whether this node is a resource map source.'''
return False
def CompressDataIfNeeded(self, data):
'''Compress data using the format specified in the compress attribute.
Args:
data: The data to compressed.
Returns:
The data in gzipped or brotli compressed format. If the format is
unspecified then this returns the data uncompressed.
'''
compress = self.attrs.get('compress')
assert not (
compress != 'default' and compress != 'false' and
self.attrs.get('file').endswith(self._COMPRESS_DISALLOWED_EXTENSIONS)
), 'Disallowed |compress| attribute found for %s' % self.attrs.get('name')
# Compress JS, HTML, CSS and SVG files by default (gzip), unless |compress|
# is explicitly specified.
compress_by_default = (compress == 'default'
and self.attrs.get('file').endswith(
self._COMPRESS_BY_DEFAULT_EXTENSIONS))
if compress == 'gzip' or compress_by_default:
# We only use rsyncable compression on Linux.
# We exclude ChromeOS since ChromeOS bots are Linux based but do not have
# the --rsyncable option built in for gzip. See crbug.com/617950.
if sys.platform == 'linux2' and 'chromeos' not in self.GetRoot().defines:
return grit.format.gzip_string.GzipStringRsyncable(data)
return grit.format.gzip_string.GzipString(data)
if compress == 'brotli':
# The length of the uncompressed data as 8 bytes little-endian.
size_bytes = struct.pack("<q", len(data))
data = brotli_util.BrotliCompress(data)
# BROTLI_CONST is prepended to brotli decompressed data in order to
# easily check if a resource has been brotli compressed.
# The length of the uncompressed data is also appended to the start,
# truncated to 6 bytes, little-endian. size_bytes is 8 bytes,
# need to truncate further to 6.
formatter = b'%ds %dx %ds' % (6, 2, len(size_bytes) - 8)
return (constants.BROTLI_CONST +
b''.join(struct.unpack(formatter, size_bytes)) +
data)
if compress == 'false' or compress == 'default':
return data
raise Exception('Invalid value for compression')
class ContentNode(Node):
'''Convenience baseclass for nodes that can have content.'''
def _ContentType(self):
return self._CONTENT_TYPE_MIXED
|
|
from __future__ import unicode_literals
import json
import re
import socket
from .common import InfoExtractor
from ..compat import (
compat_etree_fromstring,
compat_http_client,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
)
from ..utils import (
error_to_compat_str,
ExtractorError,
limit_length,
sanitized_Request,
urlencode_postdata,
get_element_by_id,
clean_html,
)
class FacebookIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
https?://
(?:\w+\.)?facebook\.com/
(?:[^#]*?\#!/)?
(?:
(?:
video/video\.php|
photo\.php|
video\.php|
video/embed|
story\.php
)\?(?:.*?)(?:v|video_id|story_fbid)=|
[^/]+/videos/(?:[^/]+/)?|
[^/]+/posts/|
groups/[^/]+/permalink/
)|
facebook:
)
(?P<id>[0-9]+)
'''
_LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
_CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
_NETRC_MACHINE = 'facebook'
IE_NAME = 'facebook'
_CHROME_USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.97 Safari/537.36'
_VIDEO_PAGE_TEMPLATE = 'https://www.facebook.com/video/video.php?v=%s'
_TESTS = [{
'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
'md5': '6a40d33c0eccbb1af76cf0485a052659',
'info_dict': {
'id': '637842556329505',
'ext': 'mp4',
'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
'uploader': 'Tennis on Facebook',
}
}, {
'note': 'Video without discernible title',
'url': 'https://www.facebook.com/video.php?v=274175099429670',
'info_dict': {
'id': '274175099429670',
'ext': 'mp4',
'title': 'Facebook video #274175099429670',
'uploader': 'Asif Nawab Butt',
},
'expected_warnings': [
'title'
]
}, {
'note': 'Video with DASH manifest',
'url': 'https://www.facebook.com/video.php?v=957955867617029',
'md5': '54706e4db4f5ad58fbad82dde1f1213f',
'info_dict': {
'id': '957955867617029',
'ext': 'mp4',
'title': 'When you post epic content on instagram.com/433 8 million followers, this is ...',
'uploader': 'Demy de Zeeuw',
},
}, {
'url': 'https://www.facebook.com/maxlayn/posts/10153807558977570',
'md5': '037b1fa7f3c2d02b7a0d7bc16031ecc6',
'info_dict': {
'id': '544765982287235',
'ext': 'mp4',
'title': '"What are you doing running in the snow?"',
'uploader': 'FailArmy',
}
}, {
'url': 'https://m.facebook.com/story.php?story_fbid=1035862816472149&id=116132035111903',
'md5': '1deb90b6ac27f7efcf6d747c8a27f5e3',
'info_dict': {
'id': '1035862816472149',
'ext': 'mp4',
'title': 'What the Flock Is Going On In New Zealand Credit: ViralHog',
'uploader': 'S. Saint',
},
}, {
'note': 'swf params escaped',
'url': 'https://www.facebook.com/barackobama/posts/10153664894881749',
'md5': '97ba073838964d12c70566e0085c2b91',
'info_dict': {
'id': '10153664894881749',
'ext': 'mp4',
'title': 'Facebook video #10153664894881749',
},
}, {
'url': 'https://www.facebook.com/video.php?v=10204634152394104',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/amogood/videos/1618742068337349/?fref=nf',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/ChristyClarkForBC/videos/vb.22819070941/10153870694020942/?type=2&theater',
'only_matching': True,
}, {
'url': 'facebook:544765982287235',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/groups/164828000315060/permalink/764967300301124/',
'only_matching': True,
}]
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https://www\.facebook\.com/video/embed.+?)\1', webpage)
if mobj is not None:
return mobj.group('url')
# Facebook API embed
# see https://developers.facebook.com/docs/plugins/embedded-video-player
mobj = re.search(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\bfb-(?:video|post)\b[^\'"]*(?P=q1)[^>]+
data-href=(?P<q2>[\'"])(?P<url>(?:https?:)?//(?:www\.)?facebook.com/.+?)(?P=q2)''', webpage)
if mobj is not None:
return mobj.group('url')
def _login(self):
(useremail, password) = self._get_login_info()
if useremail is None:
return
login_page_req = sanitized_Request(self._LOGIN_URL)
self._set_cookie('facebook.com', 'locale', 'en_US')
login_page = self._download_webpage(login_page_req, None,
note='Downloading login page',
errnote='Unable to download login page')
lsd = self._search_regex(
r'<input type="hidden" name="lsd" value="([^"]*)"',
login_page, 'lsd')
lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd')
login_form = {
'email': useremail,
'pass': password,
'lsd': lsd,
'lgnrnd': lgnrnd,
'next': 'http://facebook.com/home.php',
'default_persistent': '0',
'legacy_return': '1',
'timezone': '-60',
'trynum': '1',
}
request = sanitized_Request(self._LOGIN_URL, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
try:
login_results = self._download_webpage(request, None,
note='Logging in', errnote='unable to fetch login page')
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
error = self._html_search_regex(
r'(?s)<div[^>]+class=(["\']).*?login_error_box.*?\1[^>]*><div[^>]*>.*?</div><div[^>]*>(?P<error>.+?)</div>',
login_results, 'login error', default=None, group='error')
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
self._downloader.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.')
return
fb_dtsg = self._search_regex(
r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg', default=None)
h = self._search_regex(
r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h', default=None)
if not fb_dtsg or not h:
return
check_form = {
'fb_dtsg': fb_dtsg,
'h': h,
'name_action_selected': 'dont_save',
}
check_req = sanitized_Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
check_response = self._download_webpage(check_req, None,
note='Confirming login')
if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
self._downloader.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err))
return
def _real_initialize(self):
self._login()
def _extract_from_url(self, url, video_id, fatal_if_no_video=True):
req = sanitized_Request(url)
req.add_header('User-Agent', self._CHROME_USER_AGENT)
webpage = self._download_webpage(req, video_id)
video_data = None
BEFORE = '{swf.addParam(param[0], param[1]);});'
AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
m = re.search(re.escape(BEFORE) + '(?:\n|\\\\n)(.*?)' + re.escape(AFTER), webpage)
if m:
swf_params = m.group(1).replace('\\\\', '\\').replace('\\"', '"')
data = dict(json.loads(swf_params))
params_raw = compat_urllib_parse_unquote(data['params'])
video_data = json.loads(params_raw)['video_data']
def video_data_list2dict(video_data):
ret = {}
for item in video_data:
format_id = item['stream_type']
ret.setdefault(format_id, []).append(item)
return ret
if not video_data:
server_js_data = self._parse_json(self._search_regex(
r'handleServerJS\(({.+})\);', webpage, 'server js data', default='{}'), video_id)
for item in server_js_data.get('instances', []):
if item[1][0] == 'VideoConfig':
video_data = video_data_list2dict(item[2][0]['videoData'])
break
if not video_data:
if not fatal_if_no_video:
return webpage, False
m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
if m_msg is not None:
raise ExtractorError(
'The video is not available, Facebook said: "%s"' % m_msg.group(1),
expected=True)
else:
raise ExtractorError('Cannot parse data')
formats = []
for format_id, f in video_data.items():
if f and isinstance(f, dict):
f = [f]
if not f or not isinstance(f, list):
continue
for quality in ('sd', 'hd'):
for src_type in ('src', 'src_no_ratelimit'):
src = f[0].get('%s_%s' % (quality, src_type))
if src:
preference = -10 if format_id == 'progressive' else 0
if quality == 'hd':
preference += 5
formats.append({
'format_id': '%s_%s_%s' % (format_id, quality, src_type),
'url': src,
'preference': preference,
})
dash_manifest = f[0].get('dash_manifest')
if dash_manifest:
formats.extend(self._parse_mpd_formats(
compat_etree_fromstring(compat_urllib_parse_unquote_plus(dash_manifest))))
if not formats:
raise ExtractorError('Cannot find video formats')
self._sort_formats(formats)
video_title = self._html_search_regex(
r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>([^<]*)</h2>', webpage, 'title',
default=None)
if not video_title:
video_title = self._html_search_regex(
r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
webpage, 'alternative title', default=None)
video_title = limit_length(video_title, 80)
if not video_title:
video_title = 'Facebook video #%s' % video_id
uploader = clean_html(get_element_by_id('fbPhotoPageAuthorName', webpage))
info_dict = {
'id': video_id,
'title': video_title,
'formats': formats,
'uploader': uploader,
}
return webpage, info_dict
def _real_extract(self, url):
video_id = self._match_id(url)
real_url = self._VIDEO_PAGE_TEMPLATE % video_id if url.startswith('facebook:') else url
webpage, info_dict = self._extract_from_url(real_url, video_id, fatal_if_no_video=False)
if info_dict:
return info_dict
if '/posts/' in url:
entries = [
self.url_result('facebook:%s' % vid, FacebookIE.ie_key())
for vid in self._parse_json(
self._search_regex(
r'(["\'])video_ids\1\s*:\s*(?P<ids>\[.+?\])',
webpage, 'video ids', group='ids'),
video_id)]
return self.playlist_result(entries, video_id)
else:
_, info_dict = self._extract_from_url(
self._VIDEO_PAGE_TEMPLATE % video_id,
video_id, fatal_if_no_video=True)
return info_dict
|
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import collections
import compile_flags
import json
import logging
import multiprocessing
from multiprocessing.pool import ThreadPool
import os
import re
import subprocess
import sys
import unittest
import tempfile
from kudu_util import init_logging
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
CLANG_TIDY_DIFF = os.path.join(
ROOT, "thirdparty/installed/uninstrumented/share/clang/clang-tidy-diff.py")
CLANG_TIDY = os.path.join(
ROOT, "thirdparty/clang-toolchain/bin/clang-tidy")
GERRIT_USER = 'tidybot'
GERRIT_PASSWORD = os.getenv('TIDYBOT_PASSWORD')
GERRIT_URL = "https://gerrit.cloudera.org"
def run_tidy(sha="HEAD", is_rev_range=False):
diff_cmdline = ["git", "diff" if is_rev_range else "show", sha]
# Figure out which paths changed in the given diff.
changed_paths = subprocess.check_output(diff_cmdline + ["--name-only", "--pretty=format:"]).splitlines()
changed_paths = [p for p in changed_paths if p]
# Produce a separate diff for each file and run clang-tidy-diff on it
# in parallel.
def tidy_on_path(path):
patch_file = tempfile.NamedTemporaryFile()
cmd = diff_cmdline + [
"--src-prefix=%s/" % ROOT,
"--dst-prefix=%s/" % ROOT,
"--",
path]
subprocess.check_call(cmd, stdout=patch_file, cwd=ROOT)
cmdline = [CLANG_TIDY_DIFF,
"-clang-tidy-binary", CLANG_TIDY,
"-p0",
"--",
"-DCLANG_TIDY"] + compile_flags.get_flags()
return subprocess.check_output(
cmdline,
stdin=file(patch_file.name),
cwd=ROOT)
pool = ThreadPool(multiprocessing.cpu_count())
try:
return "".join(pool.imap(tidy_on_path, changed_paths))
except KeyboardInterrupt as ki:
sys.exit(1)
finally:
pool.terminate()
pool.join()
def split_warnings(clang_output):
accumulated = ""
for l in clang_output.splitlines():
if l == "" or l == "No relevant changes found.":
continue
if l.startswith(ROOT) and re.search(r'(warning|error): ', l):
if accumulated:
yield accumulated
accumulated = ""
accumulated += l + "\n"
if accumulated:
yield accumulated
def parse_clang_output(clang_output):
ret = []
for w in split_warnings(clang_output):
m = re.match(r"^(.+?):(\d+):\d+: ((?:warning|error): .+)$", w, re.MULTILINE | re.DOTALL)
if not m:
raise Exception("bad warning: " + w)
path, line, warning = m.groups()
ret.append(dict(
path=os.path.relpath(path, ROOT),
line=int(line),
warning=warning.strip()))
return ret
def create_gerrit_json_obj(parsed_warnings):
comments = collections.defaultdict(lambda: [])
for warning in parsed_warnings:
comments[warning['path']].append({
'line': warning['line'],
'message': warning['warning']
})
return {"comments": comments,
"notify": "OWNER",
"omit_duplicate_comments": "true"}
def get_gerrit_revision_url(git_ref):
sha = subprocess.check_output(["git", "rev-parse", git_ref]).strip()
commit_msg = subprocess.check_output(
["git", "show", sha])
matches = re.findall(r'^\s+Change-Id: (I.+)$', commit_msg, re.MULTILINE)
if not matches:
raise Exception("Could not find gerrit Change-Id for commit %s" % sha)
if len(matches) != 1:
raise Exception("Found multiple gerrit Change-Ids for commit %s" % sha)
change_id = matches[0]
return "%s/a/changes/%s/revisions/%s" % (GERRIT_URL, change_id, sha)
def post_comments(revision_url_base, gerrit_json_obj):
import requests
r = requests.post(revision_url_base + "/review",
auth=(GERRIT_USER, GERRIT_PASSWORD),
data=json.dumps(gerrit_json_obj),
headers={'Content-Type': 'application/json'})
print "Response:"
print r.headers
print r.status_code
print r.text
class TestClangTidyGerrit(unittest.TestCase):
TEST_INPUT = \
"""
No relevant changes found.
/home/todd/git/kudu/src/kudu/integration-tests/tablet_history_gc-itest.cc:579:55: warning: some warning [warning-name]
some example line of code
/home/todd/git/kudu/foo/../src/kudu/blah.cc:123:55: error: some error
blah blah
No relevant changes found.
"""
def test_parse_clang_output(self):
global ROOT
save_root = ROOT
try:
ROOT = "/home/todd/git/kudu"
parsed = parse_clang_output(self.TEST_INPUT)
finally:
ROOT = save_root
self.assertEqual(2, len(parsed))
self.assertEqual("src/kudu/integration-tests/tablet_history_gc-itest.cc", parsed[0]['path'])
self.assertEqual(579, parsed[0]['line'])
self.assertEqual("warning: some warning [warning-name]\n" +
" some example line of code",
parsed[0]['warning'])
self.assertEqual("src/kudu/blah.cc", parsed[1]['path'])
if __name__ == "__main__":
# Basic setup and argument parsing.
init_logging()
parser = argparse.ArgumentParser(
description="Run clang-tidy on a patch, optionally posting warnings as comments to gerrit")
parser.add_argument("-n", "--no-gerrit", action="store_true",
help="Whether to run locally i.e. (no interaction with gerrit)")
parser.add_argument("--rev-range", action="store_true",
default=False,
help="Whether the revision specifies the 'rev..' range")
parser.add_argument('rev', help="The git revision (or range of revisions) to process")
args = parser.parse_args()
if args.rev_range and not args.no_gerrit:
print >>sys.stderr, "--rev-range works only with --no-gerrit"
sys.exit(1)
# Find the gerrit revision URL, if applicable.
if not args.no_gerrit:
revision_url = get_gerrit_revision_url(args.rev)
print revision_url
# Run clang-tidy and parse the output.
clang_output = run_tidy(args.rev, args.rev_range)
logging.info("Clang output")
logging.info(clang_output)
if args.no_gerrit:
print >>sys.stderr, "Skipping gerrit"
sys.exit(0)
logging.info("=" * 80)
parsed = parse_clang_output(clang_output)
if not parsed:
print >>sys.stderr, "No warnings"
sys.exit(0)
print "Parsed clang warnings:"
print json.dumps(parsed, indent=4)
# Post the output as comments to the gerrit URL.
gerrit_json_obj = create_gerrit_json_obj(parsed)
print "Will post to gerrit:"
print json.dumps(gerrit_json_obj, indent=4)
post_comments(revision_url, gerrit_json_obj)
|
|
"""Module-level functions"""
import os
import re
from tempfile import mkdtemp
from typing import Optional
import pyfaidx
from appdirs import user_config_dir
from genomepy.annotation import Annotation
from genomepy.config import config
from genomepy.exceptions import GenomeDownloadError
from genomepy.files import (
_apply_fasta_regex_func,
bgzip_and_name,
glob_ext_files,
gzip_and_name,
read_readme,
update_readme,
)
from genomepy.genome import Genome
from genomepy.online import check_url
from genomepy.plugins import get_active_plugins
from genomepy.providers import download_assembly_report, online_providers
from genomepy.utils import (
cleanpath,
get_genomes_dir,
get_localname,
mkdir_p,
rm_rf,
safe,
try_except_pass,
)
def head_annotations(name: str, provider=None, n: int = 2):
"""
Quickly inspect the metadata of each available annotation for the specified genome.
For UCSC, up to 4 gene annotation styles are available:
"ncbiRefSeq", "refGene", "ensGene", "knownGene" (respectively).
For NCBI, the chromosome names are not yet sanitized.
Parameters
----------
name: str
genome name
provider: str, optional
only search the specified provider for the genome name
n: int, optional
number of lines to show
"""
for p in online_providers(provider):
if name in p.genomes:
tmp_dir = mkdtemp()
p.head_annotation(name, genomes_dir=tmp_dir, n=n)
rm_rf(tmp_dir)
def list_available_genomes(provider=None) -> list:
"""
List all available genomes.
Parameters
----------
provider : str, optional
List genomes from specific provider. Genomes from all
providers will be returned if not specified.
Yields
------
list
tuples with genome name, provider and metadata
"""
for p in online_providers(provider):
for row in p.list_available_genomes():
yield list(row[:1]) + [p.name] + list(row[1:])
def list_installed_genomes(genomes_dir: str = None) -> list:
"""
List all locally available genomes.
Parameters
----------
genomes_dir : str, optional
Directory with genomes installed by genomepy.
Returns
-------
list
genome names
"""
genomes_dir = get_genomes_dir(genomes_dir, check_exist=False)
if os.path.exists(genomes_dir):
return [
subdir
for subdir in os.listdir(genomes_dir)
if _is_genome_dir(os.path.join(genomes_dir, subdir))
]
return []
def install_genome(
name: str,
provider: Optional[str] = None,
genomes_dir: Optional[str] = None,
localname: Optional[str] = None,
mask: Optional[str] = "soft",
keep_alt: Optional[bool] = False,
regex: Optional[str] = None,
invert_match: Optional[bool] = False,
bgzip: Optional[bool] = None, # None -> check config. False -> dont check.
annotation: Optional[bool] = False,
only_annotation: Optional[bool] = False,
skip_matching: Optional[bool] = False,
skip_filter: Optional[bool] = False,
threads: Optional[int] = 1,
force: Optional[bool] = False,
**kwargs: Optional[dict],
) -> Genome:
"""
Install a genome (& gene annotation).
Parameters
----------
name : str
Genome name
provider : str , optional
Provider name. will try Ensembl, UCSC and NCBI (in that order) if not specified.
genomes_dir : str , optional
Where to create the output folder.
localname : str , optional
Custom name for this genome.
mask : str , optional
Genome masking of repetitive sequences. Options: hard/soft/none, default is soft.
keep_alt : bool , optional
Some genomes contain alternative regions. These regions cause issues with
sequence alignment, as they are inherently duplications of the consensus regions.
Set to true to keep these alternative regions.
regex : str , optional
Regular expression to select specific chromosome / scaffold names.
invert_match : bool , optional
Set to True to select all chromosomes that *don't* match the regex.
bgzip : bool , optional
If set to True the genome FASTA file will be compressed using bgzip,
and gene annotation will be compressed with gzip.
threads : int , optional
Build genome index using multithreading (if supported). Default: lowest of 8/all threads.
force : bool , optional
Set to True to overwrite existing files.
annotation : bool , optional
If set to True, download gene annotation in BED and GTF format.
only_annotation : bool , optional
If set to True, only download the gene annotation files.
skip_matching : bool , optional
If set to True, contigs in the annotation not matching
those in the genome will not be corrected.
skip_filter : bool , optional
If set to True, the gene annotations will not be filtered to match the genome contigs.
kwargs : dict , optional
Provider specific options.
toplevel : bool , optional
Ensembl only: Always download the toplevel genome. Ignores potential primary assembly.
version : int , optional
Ensembl only: Specify release version. Default is latest.
to_annotation : text , optional
URL only: direct link to annotation file.
Required if this is not the same directory as the fasta.
Returns
-------
Genome
Genome class with the installed genome
"""
name = safe(name)
localname = get_localname(name, localname)
genomes_dir = get_genomes_dir(genomes_dir, check_exist=False)
out_dir = os.path.join(genomes_dir, localname)
genome_file = os.path.join(out_dir, f"{localname}.fa")
provider = _provider_selection(name, localname, genomes_dir, provider)
# check which files need to be downloaded
genome_found = _is_genome_dir(out_dir)
download_genome = (
genome_found is False or force is True
) and only_annotation is False
annotation_found = bool(glob_ext_files(out_dir, "annotation.gtf")) and bool(
glob_ext_files(out_dir, "annotation.bed")
)
download_annotation = (annotation_found is False or force is True) and any(
[
annotation,
only_annotation,
skip_matching,
skip_filter,
kwargs.get("to_annotation"),
kwargs.get("ucsc_annotation_type"),
]
)
genome = None
genome_downloaded = False
if download_genome:
if force:
_delete_extensions(out_dir, ["fa", "fai"])
provider.download_genome(
name,
genomes_dir,
mask=mask,
localname=localname,
**kwargs,
)
genome_found = True
genome_downloaded = True
# Filter genome
_filter_genome(genome_file, regex, invert_match, keep_alt)
# Generates a Fasta object and the genome index, gaps and sizes files
genome = Genome(localname, genomes_dir=genomes_dir)
# Download the NCBI assembly report
asm_report = os.path.join(out_dir, "assembly_report.txt")
asm_acc = genome.assembly_accession
if not os.path.exists(asm_report) and asm_acc != "na":
download_assembly_report(asm_acc, asm_report)
# Export installed genome(s)
generate_env(genomes_dir=genomes_dir)
annotation_downloaded = False
if download_annotation:
if force:
_delete_extensions(out_dir, ["annotation.gtf", "annotation.bed"])
provider.download_annotation(name, genomes_dir, localname=localname, **kwargs)
annotation_downloaded = bool(
glob_ext_files(out_dir, "annotation.gtf")
) and bool(glob_ext_files(out_dir, "annotation.bed"))
if annotation_downloaded:
annotation = Annotation(localname, genomes_dir=genomes_dir)
if genome_found and not (skip_matching and skip_filter):
annotation.sanitize(not skip_matching, not skip_filter, True)
# Run active plugins (also if the genome was downloaded earlier)
if genome_found:
genome = genome if genome else Genome(localname, genomes_dir=genomes_dir)
for plugin in get_active_plugins():
plugin.after_genome_download(genome, threads, force)
# zip files downloaded now
if bgzip is True or (bgzip is None and config.get("bgzip")):
if genome_downloaded:
bgzip_and_name(genome.filename)
if annotation_downloaded:
gzip_and_name(annotation.annotation_gtf_file)
gzip_and_name(annotation.annotation_bed_file)
return genome
def generate_env(fname: str = "exports.txt", genomes_dir: str = None):
"""
Generate file with exports.
By default the export file generated is .config/genomepy/exports.txt.
An alternative file name or file path is accepted too.
Parameters
----------
fname: str, optional
Absolute path or name of the output file.
genomes_dir: str, optional
Directory with installed genomes to export.
"""
fname1 = os.path.expanduser(os.path.expandvars(fname))
fname2 = os.path.join(user_config_dir("genomepy"), fname)
fname = fname1 if os.path.isabs(fname1) else fname2
mkdir_p(os.path.dirname(fname))
with open(fname, "w") as fout:
for env in _generate_exports(genomes_dir):
fout.write(f"{env}\n")
def _generate_exports(genomes_dir: str = None):
"""Print export commands for setting environment variables."""
env = []
for name in list_installed_genomes(genomes_dir):
try:
g = Genome(name, genomes_dir, build_index=False)
env_name = re.sub(r"[^\w]+", "_", name).upper()
env.append(f"export {env_name}={g.filename}")
except (
pyfaidx.FastaIndexingError,
pyfaidx.IndexNotFoundError,
FileNotFoundError,
):
pass
return env
def _lazy_provider_selection(name, provider=None):
"""return the first PROVIDER which has genome NAME"""
providers = []
for p in online_providers(provider):
providers.append(p.name)
if name in p.genomes:
return p
if p.name == "URL" and try_except_pass(ValueError, check_url, name):
return p
if p.name == "Local" and os.path.exists(cleanpath(name)):
return p
raise GenomeDownloadError(f"{name} not found on {', '.join(providers)}.")
def _provider_selection(name, localname, genomes_dir, provider=None):
"""
Return a provider object
First tries to return a specified provider,
Second tries to return the provider from the README
Third tries to return the first provider which has the genome (Ensembl>UCSC>NCBI)
"""
readme = os.path.join(genomes_dir, localname, "README.txt")
if provider is None and os.path.exists(readme):
m, _ = read_readme(readme)
p = m["provider"].lower()
if p in ["ensembl", "ucsc", "ncbi"]:
provider = p
return _lazy_provider_selection(name, provider)
def _filter_genome(
genome_file: str,
regex: Optional[str] = None,
invert_match: Optional[bool] = False,
keep_alt: Optional[bool] = False,
):
"""
Combine regex filters & document filtered contigs
keep_alt : bool , optional
Set to true to keep these alternative regions.
regex : str , optional
Regular expression to select specific chromosome / scaffold names.
invert_match : bool , optional
Set to True to select all chromosomes that don't match the regex.
"""
if keep_alt is True and regex is None:
return
regex_func = _get_fasta_regex_func(regex, invert_match, keep_alt)
excluded_contigs = _apply_fasta_regex_func(genome_file, regex_func)
# document
regex_line = "regex: "
if keep_alt is False:
regex_line += "'alt' (inverted match)" + (" and " if regex else "")
if regex:
regex_line += f"'{regex}'" + (" (inverted match)" if invert_match else "")
lines = ["", regex_line, ""] + (
[
"The following contigs were filtered out of the genome:",
f"{', '.join(excluded_contigs)}",
]
if excluded_contigs
else ["No contigs were removed."]
)
readme = os.path.join(os.path.dirname(genome_file), "README.txt")
update_readme(readme, extra_lines=lines)
def _get_fasta_regex_func(
regex: Optional[str] = None,
invert_match: Optional[bool] = False,
keep_alt: Optional[bool] = False,
):
"""
returns a regex function that accepts a contig header and returns a bool to keep the contig or not.
"""
# define filter functions
if keep_alt is False:
alt_pattern = re.compile("(alt)", re.I) # case insensitive
def alt_keep(header):
return not bool(alt_pattern.search(header))
keep = alt_keep # rename in case there is only 1 filter function
if regex is not None:
re_pattern = re.compile(regex)
def re_keep(header):
return bool(re_pattern.search(header)) is not invert_match
keep = re_keep # rename in case there is only 1 filter function
# combine filter functions?
if regex is not None and keep_alt is False:
def keep(header):
return re_keep(header) and alt_keep(header)
return keep # noqa: IDE is confused
def _delete_extensions(directory: str, exts: list):
"""remove (gzipped) files in a directory matching any given extension"""
for ext in exts:
[rm_rf(f) for f in glob_ext_files(directory, ext)]
def _is_genome_dir(dirname):
"""
Check if a directory contains a fasta file of the same name
Parameters
----------
dirname : str
Directory name
Returns
------
bool
"""
genome_file = os.path.join(dirname, f"{os.path.basename(dirname)}.fa")
return os.path.exists(genome_file) or os.path.exists(f"{genome_file}.gz")
|
|
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
from __future__ import absolute_import
import math
from pychron.canvas.canvas2D.scene.primitives.base import Connectable
from pychron.canvas.canvas2D.scene.primitives.connections import Tee, Fork, Elbow
from pychron.canvas.canvas2D.scene.primitives.primitives import Rectangle, Bordered, BorderLine
def rounded_rect(gc, x, y, width, height, corner_radius):
with gc:
gc.translate_ctm(x, y) # draw a rounded rectangle
x = y = 0
gc.begin_path()
hw = width * 0.5
hh = height * 0.5
if hw < corner_radius:
corner_radius = hw * 0.5
elif hh < corner_radius:
corner_radius = hh * 0.5
gc.move_to(x + corner_radius, y)
gc.arc_to(x + width, y, x + width, y + corner_radius, corner_radius)
gc.arc_to(x + width, y + height, x + width - corner_radius, y + height, corner_radius)
gc.arc_to(x, y + height, x, y, corner_radius)
gc.arc_to(x, y, x + width + corner_radius, y, corner_radius)
gc.draw_path()
class RoundedRectangle(Rectangle, Connectable, Bordered):
corner_radius = 8.0
display_name = None
fill = True
use_border_gaps = True
def get_tooltip_text(self):
return 'Stage={}\nVolume={}'.format(self.name, self.volume)
def _render(self, gc):
corner_radius = self.corner_radius
with gc:
width, height = self.get_wh()
x, y = self.get_xy()
if self.fill:
rounded_rect(gc, x, y, width, height, corner_radius)
self._render_border(gc, x, y, width, height)
gc.set_fill_color(self._convert_color(self.name_color))
if self.display_name:
self._render_textbox(gc, x, y, width, height,
self.display_name)
elif not self.display_name == '':
self._render_name(gc, x, y, width, height)
def _render_border(self, gc, x, y, width, height):
if self.use_border:
corner_radius = self.corner_radius
with gc:
gc.set_line_width(self.border_width)
if self.fill:
c = self._get_border_color()
else:
c = self.default_color
c = self._convert_color(c)
gc.set_fill_color((0, 0, 0, 0))
gc.set_stroke_color(c)
rounded_rect(gc, x, y, width, height, corner_radius)
if self.use_border_gaps:
# from pychron.canvas.canvas2D.scene.primitives.connections import Fork, Tee
with gc:
for t, c in self.connections:
with gc:
gc.set_line_width(self.border_width + 1)
if isinstance(c, Elbow):
p1, p2 = c.start_point, c.end_point
if p1.y < p2.y:
p1x, p1y = p1.get_xy()
gc.move_to(p1x - 5, y + height)
gc.line_to(p1x + 5, y + height)
else:
if c.corner == 'll':
p1x, p1y = p1.get_xy()
gc.move_to(p1x - 5, p1y)
gc.line_to(p1x + 5, p1y)
else:
p2x, p2y = p2.get_xy()
xx = x
if p1.x >= self.x:
xx = x + width
gc.move_to(xx, p2y - 5)
gc.line_to(xx, p2y + 5)
elif isinstance(c, BorderLine):
p1, p2 = c.start_point, c.end_point
p2x, p2y = p2.get_xy()
if p1.x == p2.x:
yy = y
if p1.y >= self.y:
if p1.y - self.y != 1:
yy = y + height
p1x, p1y = p1.get_xy()
gc.move_to(p1x - 5, yy)
gc.line_to(p1x + 5, yy)
else:
xx = x
if p1.x >= self.x:
xx = x + width
gc.move_to(xx, p2y - 5)
gc.line_to(xx, p2y + 5)
elif isinstance(c, Tee):
if t == 'mid':
yy = y if c.left.y < self.y else y + height
mx = c.get_midx()
gc.move_to(mx - 5, yy)
gc.line_to(mx + 5, yy)
else:
gc.set_line_width(self.border_width + 2)
# gc.set_stroke_color((1,0,0))
if t == 'left':
xx, yy = c.left.get_xy()
xx += 2.5
else:
xx, yy = c.right.get_xy()
gc.move_to(xx, yy - 5)
gc.line_to(xx, yy + 5)
elif isinstance(c, Fork):
yy = y if c.left.y < self.y else y + height
mx = c.get_midx()
gc.move_to(mx - 5, yy)
gc.line_to(mx + 5, yy)
gc.draw_path()
class Spectrometer(RoundedRectangle):
tag = 'spectrometer'
def __init__(self, *args, **kw):
super(Spectrometer, self).__init__(*args, **kw)
self.width = 10
self.height = 10
class Stage(RoundedRectangle):
tag = 'stage'
def __init__(self, *args, **kw):
super(Spectrometer, self).__init__(*args, **kw)
self.width = 10
self.height = 5
class CircleStage(Connectable, Bordered):
tag = 'cirle_stage'
def get_tooltip_text(self):
return 'Circle Stage={}\nVolume={}'.format(self.name, self.volume)
def _render(self, gc):
with gc:
width, height = self.get_wh()
x, y = self.get_xy()
gc.arc(x, y, width, 0, 360)
gc.draw_path()
self._render_border(gc, x, y, width)
gc.set_fill_color(self._convert_color(self.name_color))
if self.display_name:
self._render_textbox(gc, x, y, width, height,
self.display_name)
elif not self.display_name == '':
self._render_name(gc, x, y, width, height)
def _render_textbox(self, gc, x, y, w, h, txt):
tw, th, _, _ = gc.get_full_text_extent(txt)
x = x - tw / 2.
y = y - th / 2.
self._render_text(gc, txt, x, y)
def _render_border(self, gc, x, y, width):
gc.set_line_width(self.border_width)
with gc:
c = self._get_border_color()
gc.set_stroke_color(c)
gc.arc(x, y, width, 0, 2 * math.pi)
gc.stroke_path()
self._render_gaps(gc, x, y, width)
def _render_gaps(self, gc, cx, cy, r):
gc.set_line_width(self.border_width + 2)
def sgn(x):
return -1 if x < 0 else 1
def angle(x, y):
return math.pi / 2 - math.atan2(x, y)
with gc:
gc.set_stroke_color(self._convert_color(self.default_color))
for t, c in self.connections:
if isinstance(c, BorderLine):
dw = math.atan((c.width - c.border_width / 2) / r)
p1, p2 = c.start_point, c.end_point
p2x, p2y = p2.get_xy()
p1x, p1y = p1.get_xy()
if p1x == p2x and p1y == p2y:
continue
p2x = p2x - cx
p2y = p2y - cy
p1x = p1x - cx
p1y = p1y - cy
dx = p2x - p1x
dy = p2y - p1y
dr = (dx ** 2 + dy ** 2) ** 0.5
D = p1x * p2y - p2x * p1y
ss = (r ** 2 * dr ** 2 - D ** 2) ** 0.5
plus_x = D * dy + sgn(dy) * dx * ss
minus_x = D * dy - sgn(dy) * dx * ss
plus_y = (-D * dx + abs(dy) * ss)
minus_y = (-D * dx - abs(dy) * ss)
plus_x /= dr ** 2
plus_y /= dr ** 2
minus_x /= dr ** 2
minus_y /= dr ** 2
if p2y > p1y:
if p2x > p1x:
theta = angle(plus_x, plus_y)
else:
theta = angle(minus_x, minus_y)
else:
if p2x > p1x:
theta = angle(minus_x, minus_y)
else:
theta = angle(plus_x, plus_y)
gc.arc(cx, cy, r, theta - dw, theta + dw)
gc.stroke_path()
# ============= EOF =============================================
|
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9824")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9824")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a BeatleCoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a BeatleCoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
import itertools
import heapq
import collections
import operator
from functools import partial
from toolz.compatibility import (map, filter, filterfalse, zip, zip_longest,
iteritems)
__all__ = ('remove', 'accumulate', 'groupby', 'merge_sorted', 'interleave',
'unique', 'isiterable', 'isdistinct', 'take', 'drop', 'take_nth',
'first', 'second', 'nth', 'last', 'get', 'concat', 'concatv',
'mapcat', 'cons', 'interpose', 'frequencies', 'reduceby', 'iterate',
'sliding_window', 'partition', 'partition_all', 'count', 'pluck',
'join', 'tail', 'diff', 'topk')
def remove(predicate, seq):
""" Return those items of sequence for which predicate(item) is False
>>> def iseven(x):
... return x % 2 == 0
>>> list(remove(iseven, [1, 2, 3, 4]))
[1, 3]
"""
return filterfalse(predicate, seq)
def accumulate(binop, seq):
""" Repeatedly apply binary function to a sequence, accumulating results
>>> from operator import add, mul
>>> list(accumulate(add, [1, 2, 3, 4, 5]))
[1, 3, 6, 10, 15]
>>> list(accumulate(mul, [1, 2, 3, 4, 5]))
[1, 2, 6, 24, 120]
Accumulate is similar to ``reduce`` and is good for making functions like
cumulative sum:
>>> from functools import partial, reduce
>>> sum = partial(reduce, add)
>>> cumsum = partial(accumulate, add)
See Also:
itertools.accumulate : In standard itertools for Python 3.2+
"""
seq = iter(seq)
result = next(seq)
yield result
for elem in seq:
result = binop(result, elem)
yield result
def groupby(key, seq):
""" Group a collection by a key function
>>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']
>>> groupby(len, names) # doctest: +SKIP
{3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}
>>> iseven = lambda x: x % 2 == 0
>>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP
{False: [1, 3, 5, 7], True: [2, 4, 6, 8]}
Non-callable keys imply grouping on a member.
>>> groupby('gender', [{'name': 'Alice', 'gender': 'F'},
... {'name': 'Bob', 'gender': 'M'},
... {'name': 'Charlie', 'gender': 'M'}]) # doctest:+SKIP
{'F': [{'gender': 'F', 'name': 'Alice'}],
'M': [{'gender': 'M', 'name': 'Bob'},
{'gender': 'M', 'name': 'Charlie'}]}
See Also:
countby
"""
if not callable(key):
key = getter(key)
d = collections.defaultdict(lambda: [].append)
for item in seq:
d[key(item)](item)
rv = {}
for k, v in iteritems(d):
rv[k] = v.__self__
return rv
def merge_sorted(*seqs, **kwargs):
""" Merge and sort a collection of sorted collections
This works lazily and only keeps one value from each iterable in memory.
>>> list(merge_sorted([1, 3, 5], [2, 4, 6]))
[1, 2, 3, 4, 5, 6]
>>> ''.join(merge_sorted('abc', 'abc', 'abc'))
'aaabbbccc'
The "key" function used to sort the input may be passed as a keyword.
>>> list(merge_sorted([2, 3], [1, 3], key=lambda x: x // 3))
[2, 1, 3, 3]
"""
key = kwargs.get('key', None)
if key is None:
# heapq.merge does what we do below except by val instead of key(val)
return heapq.merge(*seqs)
else:
return _merge_sorted_key(seqs, key)
def _merge_sorted_key(seqs, key):
# The commented code below shows an alternative (slower) implementation
# to apply a key function for sorting.
#
# mapper = lambda i, item: (key(item), i, item)
# keyiters = [map(partial(mapper, i), itr) for i, itr in
# enumerate(seqs)]
# return (item for (item_key, i, item) in heapq.merge(*keyiters))
# binary heap as a priority queue
pq = []
# Initial population
for itnum, it in enumerate(map(iter, seqs)):
try:
item = next(it)
pq.append([key(item), itnum, item, it])
except StopIteration:
pass
heapq.heapify(pq)
# Repeatedly yield and then repopulate from the same iterator
heapreplace = heapq.heapreplace
heappop = heapq.heappop
while len(pq) > 1:
try:
while True:
# raises IndexError when pq is empty
_, itnum, item, it = s = pq[0]
yield item
item = next(it) # raises StopIteration when exhausted
s[0] = key(item)
s[2] = item
heapreplace(pq, s) # restore heap condition
except StopIteration:
heappop(pq) # remove empty iterator
if pq:
# Much faster when only a single iterable remains
_, itnum, item, it = pq[0]
yield item
for item in it:
yield item
def interleave(seqs, pass_exceptions=()):
""" Interleave a sequence of sequences
>>> list(interleave([[1, 2], [3, 4]]))
[1, 3, 2, 4]
>>> ''.join(interleave(('ABC', 'XY')))
'AXBYC'
Both the individual sequences and the sequence of sequences may be infinite
Returns a lazy iterator
"""
iters = map(iter, seqs)
while iters:
newiters = []
for itr in iters:
try:
yield next(itr)
newiters.append(itr)
except (StopIteration,) + tuple(pass_exceptions):
pass
iters = newiters
def unique(seq, key=None):
""" Return only unique elements of a sequence
>>> tuple(unique((1, 2, 3)))
(1, 2, 3)
>>> tuple(unique((1, 2, 1, 3)))
(1, 2, 3)
Uniqueness can be defined by key keyword
>>> tuple(unique(['cat', 'mouse', 'dog', 'hen'], key=len))
('cat', 'mouse')
"""
seen = set()
seen_add = seen.add
if key is None:
for item in seq:
if item not in seen:
seen_add(item)
yield item
else: # calculate key
for item in seq:
val = key(item)
if val not in seen:
seen_add(val)
yield item
def isiterable(x):
""" Is x iterable?
>>> isiterable([1, 2, 3])
True
>>> isiterable('abc')
True
>>> isiterable(5)
False
"""
try:
iter(x)
return True
except TypeError:
return False
def isdistinct(seq):
""" All values in sequence are distinct
>>> isdistinct([1, 2, 3])
True
>>> isdistinct([1, 2, 1])
False
>>> isdistinct("Hello")
False
>>> isdistinct("World")
True
"""
if iter(seq) is seq:
seen = set()
seen_add = seen.add
for item in seq:
if item in seen:
return False
seen_add(item)
return True
else:
return len(seq) == len(set(seq))
def take(n, seq):
""" The first n elements of a sequence
>>> list(take(2, [10, 20, 30, 40, 50]))
[10, 20]
See Also:
drop
tail
"""
return itertools.islice(seq, n)
def tail(n, seq):
""" The last n elements of a sequence
>>> tail(2, [10, 20, 30, 40, 50])
[40, 50]
See Also:
drop
take
"""
try:
return seq[-n:]
except (TypeError, KeyError):
return tuple(collections.deque(seq, n))
def drop(n, seq):
""" The sequence following the first n elements
>>> list(drop(2, [10, 20, 30, 40, 50]))
[30, 40, 50]
See Also:
take
tail
"""
return itertools.islice(seq, n, None)
def take_nth(n, seq):
""" Every nth item in seq
>>> list(take_nth(2, [10, 20, 30, 40, 50]))
[10, 30, 50]
"""
return itertools.islice(seq, 0, None, n)
def first(seq):
""" The first element in a sequence
>>> first('ABC')
'A'
"""
return next(iter(seq))
def second(seq):
""" The second element in a sequence
>>> second('ABC')
'B'
"""
return next(itertools.islice(seq, 1, None))
def nth(n, seq):
""" The nth element in a sequence
>>> nth(1, 'ABC')
'B'
"""
if isinstance(seq, (tuple, list, collections.Sequence)):
return seq[n]
else:
return next(itertools.islice(seq, n, None))
def last(seq):
""" The last element in a sequence
>>> last('ABC')
'C'
"""
return tail(1, seq)[0]
rest = partial(drop, 1)
no_default = '__no__default__'
def _get(ind, seq, default):
try:
return seq[ind]
except (KeyError, IndexError):
return default
def get(ind, seq, default=no_default):
""" Get element in a sequence or dict
Provides standard indexing
>>> get(1, 'ABC') # Same as 'ABC'[1]
'B'
Pass a list to get multiple values
>>> get([1, 2], 'ABC') # ('ABC'[1], 'ABC'[2])
('B', 'C')
Works on any value that supports indexing/getitem
For example here we see that it works with dictionaries
>>> phonebook = {'Alice': '555-1234',
... 'Bob': '555-5678',
... 'Charlie':'555-9999'}
>>> get('Alice', phonebook)
'555-1234'
>>> get(['Alice', 'Bob'], phonebook)
('555-1234', '555-5678')
Provide a default for missing values
>>> get(['Alice', 'Dennis'], phonebook, None)
('555-1234', None)
See Also:
pluck
"""
try:
return seq[ind]
except TypeError: # `ind` may be a list
if isinstance(ind, list):
if default is no_default:
if len(ind) > 1:
return operator.itemgetter(*ind)(seq)
elif ind:
return (seq[ind[0]],)
else:
return ()
else:
return tuple(_get(i, seq, default) for i in ind)
elif default is not no_default:
return default
else:
raise
except (KeyError, IndexError): # we know `ind` is not a list
if default is no_default:
raise
else:
return default
def concat(seqs):
""" Concatenate zero or more iterables, any of which may be infinite.
An infinite sequence will prevent the rest of the arguments from
being included.
We use chain.from_iterable rather than chain(*seqs) so that seqs
can be a generator.
>>> list(concat([[], [1], [2, 3]]))
[1, 2, 3]
See also:
itertools.chain.from_iterable equivalent
"""
return itertools.chain.from_iterable(seqs)
def concatv(*seqs):
""" Variadic version of concat
>>> list(concatv([], ["a"], ["b", "c"]))
['a', 'b', 'c']
See also:
itertools.chain
"""
return concat(seqs)
def mapcat(func, seqs):
""" Apply func to each sequence in seqs, concatenating results.
>>> list(mapcat(lambda s: [c.upper() for c in s],
... [["a", "b"], ["c", "d", "e"]]))
['A', 'B', 'C', 'D', 'E']
"""
return concat(map(func, seqs))
def cons(el, seq):
""" Add el to beginning of (possibly infinite) sequence seq.
>>> list(cons(1, [2, 3]))
[1, 2, 3]
"""
yield el
for s in seq:
yield s
def interpose(el, seq):
""" Introduce element between each pair of elements in seq
>>> list(interpose("a", [1, 2, 3]))
[1, 'a', 2, 'a', 3]
"""
combined = zip(itertools.repeat(el), seq)
return drop(1, concat(combined))
def frequencies(seq):
""" Find number of occurrences of each value in seq
>>> frequencies(['cat', 'cat', 'ox', 'pig', 'pig', 'cat']) #doctest: +SKIP
{'cat': 3, 'ox': 1, 'pig': 2}
See Also:
countby
groupby
"""
d = collections.defaultdict(int)
for item in seq:
d[item] += 1
return dict(d)
def reduceby(key, binop, seq, init=no_default):
""" Perform a simultaneous groupby and reduction
The computation:
>>> result = reduceby(key, binop, seq, init) # doctest: +SKIP
is equivalent to the following:
>>> def reduction(group): # doctest: +SKIP
... return reduce(binop, group, init) # doctest: +SKIP
>>> groups = groupby(key, seq) # doctest: +SKIP
>>> result = valmap(reduction, groups) # doctest: +SKIP
But the former does not build the intermediate groups, allowing it to
operate in much less space. This makes it suitable for larger datasets
that do not fit comfortably in memory
The ``init`` keyword argument is the default initialization of the
reduction. This can be either a constant value like ``0`` or a callable
like ``lambda : 0`` as might be used in ``defaultdict``.
Simple Examples
---------------
>>> from operator import add, mul
>>> iseven = lambda x: x % 2 == 0
>>> data = [1, 2, 3, 4, 5]
>>> reduceby(iseven, add, data) # doctest: +SKIP
{False: 9, True: 6}
>>> reduceby(iseven, mul, data) # doctest: +SKIP
{False: 15, True: 8}
Complex Example
---------------
>>> projects = [{'name': 'build roads', 'state': 'CA', 'cost': 1000000},
... {'name': 'fight crime', 'state': 'IL', 'cost': 100000},
... {'name': 'help farmers', 'state': 'IL', 'cost': 2000000},
... {'name': 'help farmers', 'state': 'CA', 'cost': 200000}]
>>> reduceby('state', # doctest: +SKIP
... lambda acc, x: acc + x['cost'],
... projects, 0)
{'CA': 1200000, 'IL': 2100000}
Example Using ``init``
----------------------
>>> def set_add(s, i):
... s.add(i)
... return s
>>> reduceby(iseven, set_add, [1, 2, 3, 4, 1, 2, 3], set) # doctest: +SKIP
{True: set([2, 4]),
False: set([1, 3])}
"""
if init is not no_default and not callable(init):
_init = init
init = lambda: _init
if not callable(key):
key = getter(key)
d = {}
for item in seq:
k = key(item)
if k not in d:
if init is no_default:
d[k] = item
continue
else:
d[k] = init()
d[k] = binop(d[k], item)
return d
def iterate(func, x):
""" Repeatedly apply a function func onto an original input
Yields x, then func(x), then func(func(x)), then func(func(func(x))), etc..
>>> def inc(x): return x + 1
>>> counter = iterate(inc, 0)
>>> next(counter)
0
>>> next(counter)
1
>>> next(counter)
2
>>> double = lambda x: x * 2
>>> powers_of_two = iterate(double, 1)
>>> next(powers_of_two)
1
>>> next(powers_of_two)
2
>>> next(powers_of_two)
4
>>> next(powers_of_two)
8
"""
while True:
yield x
x = func(x)
def sliding_window(n, seq):
""" A sequence of overlapping subsequences
>>> list(sliding_window(2, [1, 2, 3, 4]))
[(1, 2), (2, 3), (3, 4)]
This function creates a sliding window suitable for transformations like
sliding means / smoothing
>>> mean = lambda seq: float(sum(seq)) / len(seq)
>>> list(map(mean, sliding_window(2, [1, 2, 3, 4])))
[1.5, 2.5, 3.5]
"""
it = iter(seq)
# An efficient FIFO data structure with maximum length
d = collections.deque(itertools.islice(it, n), n)
if len(d) != n:
raise StopIteration()
d_append = d.append
for item in it:
yield tuple(d)
d_append(item)
yield tuple(d)
no_pad = '__no__pad__'
def partition(n, seq, pad=no_pad):
""" Partition sequence into tuples of length n
>>> list(partition(2, [1, 2, 3, 4]))
[(1, 2), (3, 4)]
If the length of ``seq`` is not evenly divisible by ``n``, the final tuple
is dropped if ``pad`` is not specified, or filled to length ``n`` by pad:
>>> list(partition(2, [1, 2, 3, 4, 5]))
[(1, 2), (3, 4)]
>>> list(partition(2, [1, 2, 3, 4, 5], pad=None))
[(1, 2), (3, 4), (5, None)]
See Also:
partition_all
"""
args = [iter(seq)] * n
if pad is no_pad:
return zip(*args)
else:
return zip_longest(*args, fillvalue=pad)
def partition_all(n, seq):
""" Partition all elements of sequence into tuples of length at most n
The final tuple may be shorter to accommodate extra elements.
>>> list(partition_all(2, [1, 2, 3, 4]))
[(1, 2), (3, 4)]
>>> list(partition_all(2, [1, 2, 3, 4, 5]))
[(1, 2), (3, 4), (5,)]
See Also:
partition
"""
args = [iter(seq)] * n
it = zip_longest(*args, fillvalue=no_pad)
prev = next(it)
for item in it:
yield prev
prev = item
if prev[-1] is no_pad:
yield prev[:prev.index(no_pad)]
else:
yield prev
def count(seq):
""" Count the number of items in seq
Like the builtin ``len`` but works on lazy sequencies.
Not to be confused with ``itertools.count``
See also:
len
"""
if hasattr(seq, '__len__'):
return len(seq)
return sum(1 for i in seq)
def pluck(ind, seqs, default=no_default):
""" plucks an element or several elements from each item in a sequence.
``pluck`` maps ``itertoolz.get`` over a sequence and returns one or more
elements of each item in the sequence.
This is equivalent to running `map(curried.get(ind), seqs)`
``ind`` can be either a single string/index or a sequence of
strings/indices.
``seqs`` should be sequence containing sequences or dicts.
e.g.
>>> data = [{'id': 1, 'name': 'Cheese'}, {'id': 2, 'name': 'Pies'}]
>>> list(pluck('name', data))
['Cheese', 'Pies']
>>> list(pluck([0, 1], [[1, 2, 3], [4, 5, 7]]))
[(1, 2), (4, 5)]
See Also:
get
map
"""
if default is no_default:
get = getter(ind)
return map(get, seqs)
elif isinstance(ind, list):
return (tuple(_get(item, seq, default) for item in ind)
for seq in seqs)
return (_get(ind, seq, default) for seq in seqs)
def getter(index):
if isinstance(index, list):
if len(index) == 1:
index = index[0]
return lambda x: (x[index],)
elif index:
return operator.itemgetter(*index)
else:
return lambda x: ()
else:
return operator.itemgetter(index)
def join(leftkey, leftseq, rightkey, rightseq,
left_default=no_default, right_default=no_default):
""" Join two sequences on common attributes
This is a semi-streaming operation. The LEFT sequence is fully evaluated
and placed into memory. The RIGHT sequence is evaluated lazily and so can
be arbitrarily large.
>>> friends = [('Alice', 'Edith'),
... ('Alice', 'Zhao'),
... ('Edith', 'Alice'),
... ('Zhao', 'Alice'),
... ('Zhao', 'Edith')]
>>> cities = [('Alice', 'NYC'),
... ('Alice', 'Chicago'),
... ('Dan', 'Syndey'),
... ('Edith', 'Paris'),
... ('Edith', 'Berlin'),
... ('Zhao', 'Shanghai')]
>>> # Vacation opportunities
>>> # In what cities do people have friends?
>>> result = join(second, friends,
... first, cities)
>>> for ((a, b), (c, d)) in sorted(unique(result)):
... print((a, d))
('Alice', 'Berlin')
('Alice', 'Paris')
('Alice', 'Shanghai')
('Edith', 'Chicago')
('Edith', 'NYC')
('Zhao', 'Chicago')
('Zhao', 'NYC')
('Zhao', 'Berlin')
('Zhao', 'Paris')
Specify outer joins with keyword arguments ``left_default`` and/or
``right_default``. Here is a full outer join in which unmatched elements
are paired with None.
>>> identity = lambda x: x
>>> list(join(identity, [1, 2, 3],
... identity, [2, 3, 4],
... left_default=None, right_default=None))
[(2, 2), (3, 3), (None, 4), (1, None)]
Usually the key arguments are callables to be applied to the sequences. If
the keys are not obviously callable then it is assumed that indexing was
intended, e.g. the following is a legal change
>>> # result = join(second, friends, first, cities)
>>> result = join(1, friends, 0, cities) # doctest: +SKIP
"""
if not callable(leftkey):
leftkey = getter(leftkey)
if not callable(rightkey):
rightkey = getter(rightkey)
d = groupby(leftkey, leftseq)
seen_keys = set()
for item in rightseq:
key = rightkey(item)
seen_keys.add(key)
try:
left_matches = d[key]
for match in left_matches:
yield (match, item)
except KeyError:
if left_default is not no_default:
yield (left_default, item)
if right_default is not no_default:
for key, matches in d.items():
if key not in seen_keys:
for match in matches:
yield (match, right_default)
def diff(*seqs, **kwargs):
""" Return those items that differ between sequences
>>> list(diff([1, 2, 3], [1, 2, 10, 100]))
[(3, 10)]
Shorter sequences may be padded with a ``default`` value:
>>> list(diff([1, 2, 3], [1, 2, 10, 100], default=None))
[(3, 10), (None, 100)]
A ``key`` function may also be applied to each item to use during
comparisons:
>>> list(diff(['apples', 'bananas'], ['Apples', 'Oranges'], key=str.lower))
[('bananas', 'Oranges')]
"""
N = len(seqs)
if N == 1 and isinstance(seqs[0], list):
seqs = seqs[0]
N = len(seqs)
if N < 2:
raise TypeError('Too few sequences given (min 2 required)')
default = kwargs.get('default', no_default)
if default is no_default:
iters = zip(*seqs)
else:
iters = zip_longest(*seqs, fillvalue=default)
key = kwargs.get('key', None)
if key is None:
for items in iters:
if items.count(items[0]) != N:
yield items
else:
for items in iters:
vals = tuple(map(key, items))
if vals.count(vals[0]) != N:
yield items
def topk(k, seq, key=None):
"""
Find the k largest elements of a sequence
Operates lazily in ``n*log(k)`` time
>>> topk(2, [1, 100, 10, 1000])
(1000, 100)
Use a key function to change sorted order
>>> topk(2, ['Alice', 'Bob', 'Charlie', 'Dan'], key=len)
('Charlie', 'Alice')
See also:
heapq.nlargest
"""
if key and not callable(key):
key = getter(key)
return tuple(heapq.nlargest(k, seq, key=key))
|
|
"""
Noise processing
Jacob Dein 2016
nacoustik
Author: Jacob Dein
License: MIT
"""
import numpy as np
import pandas as pd
from numba import guvectorize, float64, int64
from scipy.ndimage import label, find_objects
from scipy.ndimage.morphology import generate_binary_structure
# implemented as a universal function via numba.guvectorize
@guvectorize([(float64[:,:,:], int64[:], int64[:],
float64[:,:,:], float64[:,:,:])],
'(c,f,t),(h),(e)->(c,f,h),(c,f,e)', nopython=True)
def _calculate_histograms(a, h_bins, e_bins, hists, edges):
for channel in range(a.shape[0]):
for f_band in range(a.shape[1]):
hists[channel, f_band], edges[channel, f_band] = \
np.histogram(a[channel, f_band], h_bins[0])
def _find_cutoff_index(histogram, cutoff_count):
cumsum = 0.0
for index, value in histogram.iteritems():
cumsum += value
if cumsum >= cutoff_count:
break
return index
# implemented as a universal function via numba.guvectorize
@guvectorize([(float64[:,:,:], float64[:,:,:])],
'(c,f,t)->(c,f,t)', nopython=True)
def _denoise(a, b):
for channel in range(2):
for f_band in range(4, a.shape[1] - 4):
for t_step in range(1, a.shape[2] - 1):
neighborhood = a[channel, \
f_band - 4:f_band + 5, \
t_step - 1:t_step + 2]
if neighborhood.mean() < 10:
b[channel, f_band, t_step] = neighborhood.min()
else:
b[channel, f_band, t_step] = neighborhood[4, 1]
# convenience function to subtract two values given in decibels
# returns 0 if subtraction result is negative
def _subtract_decibels(a, b):
c = (10**(a / 10)) - (10**(b / 10))
c[c <= 0] = 1
return 10 * np.log10(c)
def remove_background_noise(a, N=0.1, iterations=1):
"""
Removes background noise
Parameters
----------
a: numpy float64 array
a 3d array (channels, frequency bands, time steps)
representing the spectrogram of a wave signal
N: float, default = 0.1
decimal value (0 - 1) to determine signal cutoff levels
iterations: int, default = 1
number of iterations to run the denoise algorithm
"""
# determine number of histogram bins
n_bins = np.round(a.shape[1] / 8).astype(np.int)
# allocate arrays for histograms and edge values
shape_histograms = (a.shape[0], a.shape[1], n_bins)
shape_edges = (a.shape[0], a.shape[1], n_bins + 1)
histograms = np.empty(shape_histograms)
edges = np.empty(shape_edges)
# call 'calculate_histograms' ufunc
histograms, edges = _calculate_histograms(a,
# number of bins in histograms, as an array (hack)
# necessary for numba.guvectorize function
np.ones(shape=(n_bins), dtype=np.int64) * n_bins,
# number of histogram edges, as an array (hack)
np.ones(shape=(n_bins + 1), dtype=np.int64) * (n_bins + 1),
histograms, edges)
# allocate array for modal and cutoff values
cutoffs = np.empty(shape=(histograms.shape[0], histograms.shape[1]))
modals = np.empty(shape=(histograms.shape[0], histograms.shape[1]))
# loop through all histograms for each frequency band
for channel in range(a.shape[0]):
# allocate minimum modal variable
modal_minimum = 0
for f_band in range(a.shape[1]):
# convert histogram to pandas.Series (to use moving average function)
histogram = pd.Series(histograms[channel, f_band])
# smooth
histogram = histogram.rolling(center=False, window=5).mean()
# replace NaN values generated by moving average
histogram.replace(np.NaN, 0, inplace=True)
# determine modal value
modal_index = histogram.idxmax()
modal = edges[channel, f_band, modal_index]
if modal > modal_minimum:
modals[channel, f_band] = modal_minimum
else:
modal_minimum = modal
modals[channel, f_band] = modal_minimum
# determine cutoff value
cutoff_count = histogram[0:modal_index].sum() * 0.68 * N
cutoffs[channel, f_band] = edges[channel, f_band, _find_cutoff_index(histogram, cutoff_count)]
#cutoffs_smooth = np.empty(shape=cutoffs.shape)
for channel in range(a.shape[0]):
smoothed = pd.Series(cutoffs[channel]) \
.rolling(center=False, window=5).mean()
# replace NaN values
smoothed.replace(np.NaN, smoothed.max(), inplace=True)
cutoffs[channel] = smoothed.values
ale = np.empty_like(a)
it = np.nditer([cutoffs], flags=['c_index', 'multi_index'],
op_flags=[['readonly']])
while not it.finished:
ale[it.multi_index[0], it.multi_index[1]] = \
a[it.multi_index[0], it.multi_index[1]] - it.value
it.iternext()
ale = np.select(condlist=[ale > 0], choicelist=[ale], default=0)
for i in range(iterations):
ale = _denoise(ale, np.zeros_like(ale))
# replace values through the ale mask (where ale = 0)
mask = np.ma.masked_not_equal(ale, value=0).mask
#a_mask = a * mask
#it = np.nditer([cutoffs], flags=['c_index', 'multi_index'],
# op_flags=[['readonly']])
#while not it.finished:
# a_mask[it.multi_index[0], it.multi_index[1]] = \
# _subtract_decibels(a_mask[it.multi_index[0], it.multi_index[1]],
# it.value)
# it.iternext()
#ale = a_mask * mask
#return ale
return a * mask
def _label(ale):
s = generate_binary_structure(2, 2)
labels = np.empty_like(ale, dtype=np.int32)
n_features = np.empty(shape=(2), dtype=np.int32)
for channel in range(2):
labels[channel], n_features[channel] = label(ale[channel], structure=s)
return labels, n_features
def _find_low_rois(cutoff_frequency, rois, n_channels, freq_delta):
cutoffs = []
for channel in range(n_channels):
index = np.ceil(cutoff_frequency / freq_delta).astype(np.int32)
cutoffs.append(np.searchsorted(rois[channel][:, 0], index))
return cutoffs
def _find_high_rois(cutoff_frequency, rois, n_channels, freq_delta):
indices = []
for channel in range(n_channels):
index = np.ceil(cutoff_frequency / freq_delta).astype(np.int32)
indices.append(np.where(rois[channel][:, 1] >= index)[0])
return indices
def remove_anthrophony(ale, time_delta, freq_delta, cutoffs=(1000, 11000)):
"""
Removes anthrophony from a PSD spectrogram
Parameters
----------
ale: numpy float64 array
a 3d array (channels, frequency bands, time steps)
representing the spectrogram of a wave signal (with ale applied)
time_delta: float
amount of time between spectrogram intervals
freq_delta: float
width of each spectrogram frequency band
cutoffs: tuple of integers, (low_cutoff, high_cutoff), default = (1000, 11000)
low and high cutoff frequencies that
determine what noise events to remove
(any event with frequencies above and below the range)
"""
# label
s = generate_binary_structure(2, 2)
labels = np.empty_like(ale, dtype=np.int32)
n_features = np.empty(shape=(2), dtype=np.int32)
for channel in range(labels.shape[0]):
labels[channel], n_features[channel] = label(ale[channel], structure=s)
roi_windows = []
for channel in range(labels.shape[0]):
roi_windows_channel = find_objects(labels[channel])
roi_windows.append(roi_windows_channel)
rois = []
for channel in range(labels.shape[0]):
rois_channel = np.empty(shape=(n_features[channel], 4), dtype=np.int32)
index = 0
for window in roi_windows[channel]:
# frequency band
rois_channel[index, 0] = window[0].start
rois_channel[index, 1] = window[0].stop
# time window
rois_channel[index, 2] = window[1].start
rois_channel[index, 3] = window[1].stop
index += 1
rois.append(rois_channel)
#low_cutoffs
low_cutoffs = []
for channel in range(labels.shape[0]):
index = np.ceil(cutoffs[0] / freq_delta).astype(np.int32)
low_cutoffs.append(np.searchsorted(rois[channel][:, 0], index))
#high_indices
high_indices = []
for channel in range(labels.shape[0]):
index = np.ceil(cutoffs[1] / freq_delta).astype(np.int32)
high_indices.append(np.where(rois[channel][:, 1] >= index)[0])
for channel in range(labels.shape[0]):
for i in range(0, low_cutoffs[channel]):
a_roi = labels[channel, rois[channel][i, 0]:rois[channel][i, 1], rois[channel][i, 2]:rois[channel][i, 3]]
mask = np.ma.masked_not_equal(a_roi, value=(i + 1)).mask
a_roi = ale[channel, rois[channel][i, 0]:rois[channel][i, 1], rois[channel][i, 2]:rois[channel][i, 3]] * mask
ale[channel, rois[channel][i, 0]:rois[channel][i, 1], rois[channel][i, 2]:rois[channel][i, 3]] = a_roi
for channel in range(labels.shape[0]):
for i in high_indices[channel]:
a_roi = labels[channel, rois[channel][i, 0]:rois[channel][i, 1], rois[channel][i, 2]:rois[channel][i, 3]]
mask = np.ma.masked_not_equal(a_roi, value=(i + 1)).mask
a_roi = ale[channel, rois[channel][i, 0]:rois[channel][i, 1], rois[channel][i, 2]:rois[channel][i, 3]] * mask
ale[channel, rois[channel][i, 0]:rois[channel][i, 1], rois[channel][i, 2]:rois[channel][i, 3]] = a_roi
return ale
|
|
import binascii
import json
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
import pem
from twisted.internet.defer import Deferred
from twisted.logger import Logger
from txacme.interfaces import ICertificateStore
from zope.interface import implementer
from marathon_acme.clients.vault import CasError
def sort_pem_objects(pem_objects):
"""
Given a list of pem objects, sort the objects into the private key, leaf
certificate, and list of CA certificates in the trust chain. This function
assumes that the list of pem objects will contain exactly one private key
and exactly one leaf certificate and that only key and certificate type
objects are provided.
"""
keys, certs, ca_certs = [], [], []
for pem_object in pem_objects:
if isinstance(pem_object, pem.Key):
keys.append(pem_object)
else:
# This assumes all pem objects provided are either of type pem.Key
# or pem.Certificate. Technically, there are CSR and CRL types, but
# we should never be passed those.
if _is_ca(pem_object):
ca_certs.append(pem_object)
else:
certs.append(pem_object)
[key], [cert] = keys, certs
return key, cert, ca_certs
def _is_ca(cert_pem_object):
cert = x509.load_pem_x509_certificate(
cert_pem_object.as_bytes(), default_backend())
basic_constraints = (
cert.extensions.get_extension_for_class(x509.BasicConstraints).value)
return basic_constraints.ca
def _cert_data_from_pem_objects(key, cert, ca_certs):
privkey = key.as_text()
cert = cert.as_text()
chain = ''.join([c.as_text() for c in ca_certs])
return {'privkey': privkey, 'cert': cert, 'chain': chain}
def _cert_data_to_pem_objects(cert_data):
"""
Given a non-None response from the Vault key/value store, convert the
key/values into a list of PEM objects.
"""
pem_objects = []
for key in ['privkey', 'cert', 'chain']:
pem_objects.extend(pem.parse(cert_data[key].encode('utf-8')))
return pem_objects
def _live_value(cert_pem_object, version):
# https://cryptography.io/en/stable/x509/reference/#cryptography.x509.load_pem_x509_certificate
cert = x509.load_pem_x509_certificate(
cert_pem_object.as_bytes(), default_backend())
# https://cryptography.io/en/stable/x509/reference/#cryptography.x509.Certificate.fingerprint
fingerprint = cert.fingerprint(hashes.SHA256())
fingerprint = binascii.hexlify(fingerprint).decode('utf-8')
# https://cryptography.io/en/stable/x509/reference/#cryptography.x509.Extensions.get_extension_for_class
# https://cryptography.io/en/stable/x509/reference/#cryptography.x509.SubjectAlternativeName.get_values_for_type
sans = cert.extensions.get_extension_for_class(x509.SubjectAlternativeName)
dns_names = sans.value.get_values_for_type(x509.DNSName)
return {
'version': version,
'fingerprint': fingerprint,
'dns_names': dns_names
}
@implementer(ICertificateStore)
class VaultKvCertificateStore(object):
"""
A ``txacme.interfaces.ICertificateStore`` implementation that stores
certificates in a Vault key/value version 2 secret engine.
"""
log = Logger()
def __init__(self, client, mount_path):
self._client = client
self._mount_path = mount_path
def get(self, server_name):
d = self._client.read_kv2(
'certificates/' + server_name, mount_path=self._mount_path)
def handle_not_found(response):
if response is None:
raise KeyError(server_name)
return response
def get_data(response):
return response['data']['data']
d.addCallback(handle_not_found)
d.addCallback(get_data)
d.addCallback(_cert_data_to_pem_objects)
return d
def store(self, server_name, pem_objects):
"""
The procedure for storing certificates is as follows:
1. The new certificate is stored without a CAS parameter. This assumes
that the certificate we are storing is always up-to-date.
1.1 From Vault's response, take the new certificate version:
``v_cert_new``.
2. The live map is read.
2.1 The version of the live map is kept: ``v_live``
2.2 Check if the certificate version in the live map is
``>= v_cert_new``.
2.2.1 If so, assume somebody else updated the live map. Finish.
2.2.2 If not, continue.
3. Update the live map and write it with ``cas=v_live``.
3.1 If the CAS fails, go back to step 2.
"""
# First store the certificate
key, cert, ca_certs = sort_pem_objects(pem_objects)
data = _cert_data_from_pem_objects(key, cert, ca_certs)
self.log.debug("Storing certificate '{server_name}'...",
server_name=server_name)
d = self._client.create_or_update_kv2(
'certificates/' + server_name, data, mount_path=self._mount_path)
def live_value(cert_response):
cert_version = cert_response['data']['version']
return _live_value(cert, cert_version)
d.addCallback(live_value)
# Then update the live mapping
return d.addCallback(self._update_live, server_name)
def _update_live(self, new_live_value, server_name):
d = self._read_live_data_and_version()
# When we fail to update the live mapping due to a Check-And-Set
# mismatch, try again from scratch
def retry_on_cas_error(failure):
failure.trap(CasError)
self.log.warn('Check-And-Set mismatch while updating live '
'mapping. Retrying...')
return self._update_live(new_live_value, server_name)
def update(live_data_and_version):
live, version = live_data_and_version
# Get the existing version of the cert in the live mapping
existing_live_value = live.get(server_name)
if existing_live_value is not None:
existing_cert_version = (
json.loads(existing_live_value)['version'])
else:
self.log.debug(
"Certificate '{server_name}' not previously stored",
server_name=server_name)
existing_cert_version = 0
# If the existing cert version is lower than what we want to update
# it to, then try update it
new_cert_version = new_live_value['version']
if existing_cert_version < new_cert_version:
self.log.debug(
"Updating live mapping for certificate '{server_name}' "
'from version {v1} to {v2}', server_name=server_name,
v1=existing_cert_version, v2=new_cert_version)
live[server_name] = json.dumps(new_live_value)
d = self._client.create_or_update_kv2(
'live', live, cas=version, mount_path=self._mount_path)
d.addErrback(retry_on_cas_error)
return d
else:
# Else assume somebody else updated the live mapping and stop
self.log.warn(
'Existing certificate version ({v1}) >= version we are '
'trying to store ({v2}). Not updating...',
v1=existing_cert_version, v2=new_cert_version)
return
d.addCallback(update)
return d
def _read_live_data_and_version(self):
d = self._client.read_kv2('live', mount_path=self._mount_path)
def get_data_and_version(response):
if response is not None:
data = response['data']['data']
version = response['data']['metadata']['version']
else:
data = {}
version = 0
self.log.debug('Read live mapping version {v} with {len_live} '
'entries.', v=version, len_live=len(data))
return data, version
return d.addCallback(get_data_and_version)
def as_dict(self):
d = self._read_live_data_and_version()
d.addCallback(self._read_all_certs)
return d
def _read_all_certs(self, live_data_and_version):
live, _ = live_data_and_version
certs = {}
def read_cert(_result, name):
self.log.debug("Reading certificate '{name}'...", name=name)
return self.get(name)
def collect_cert(pem_objects, name):
certs[name] = pem_objects
# Chain some deferreds to execute in series so we don't DoS Vault
d = Deferred()
for name, value in live.items():
d.addCallback(read_cert, name)
# TODO: Try update live mapping on version mismatchs
# TODO: Warn on certificate fingerprint, or dns_names mismatch
d.addCallback(collect_cert, name)
d.addCallback(lambda _result: certs)
# First deferred does nothing. Callback it to get the chain going.
d.callback(None)
return d
|
|
# coding: utf-8
# Distributed under the terms of the MIT License.
import pdb
import sys
import os.path
from ababe.stru.scaffold import SitesGrid, CStru, GeneralCell
from ababe.stru.element import GhostSpecie, Specie
from itertools import combinations, zip_longest
from progressbar import ProgressBar
import numpy as np
import spglib
from spglib import get_symmetry
import os
import xxhash
from itertools import tee
# Filename sogen is for Site-Occupy-GENerator
def numbers2id(numbers_arr):
num_hash = xxhash.xxh64(numbers_arr).intdigest()
return num_hash
class OccupyGenerator(object):
"""
The class takes a GeneralCell instance as input,
and can produces a generator GeneralCell instances which are
nonduplicated to each others.
"""
def __init__(self, general_cell):
self.init_cell = general_cell
self.lattice = general_cell.lattice
self.positions = general_cell.positions
self.numbers = general_cell.numbers
self.symmetry_permutation = general_cell.get_symmetry_permutation()
self.wyckoffs = general_cell.get_wyckoffs()
def is_equivalent(self, cell_i, cell_other):
numbers_i = cell_i.numbers
numbers_other = cell_other.numbers
for perm in self.symmetry_permutation:
new_numbers = numbers_i[perm]
if np.array_equal(new_numbers, numbers_other):
return True
return False
def gen_dup_unitary(self, n, sp):
init_numbers = self.init_cell.numbers
num_count = init_numbers.size # number of atoms in structure
i_speckle = sp.Z
from itertools import combinations
for comb_index in combinations(range(num_count), n):
numbers = init_numbers.copy()
for index in comb_index:
numbers[index] = i_speckle
yield GeneralCell(self.lattice, self.positions, numbers)
def gen_nodup_unitary(self, n, sp):
dup = self.gen_dup_unitary(n, sp)
# sym_perm = self.symmetry_permutation
# isoset = dict()
# for cell in dup:
# cell_id = cell.id
# if cell_id not in isoset:
# yield cell
# # from ababe.stru.io import VaspPOSCAR
# # pdb.set_trace()
# self._update_isoset(isoset, cell.numbers, sym_perm)
return self.gen_2nodup_gen(dup)
def gen_dup(self, wy, n, sp):
init_numbers = self.init_cell.numbers
i_speckle = sp.Z
wyckoffs = self.wyckoffs
sp_ind = [i for i, w in enumerate(wyckoffs) if w is wy]
# pdb.set_trace()
for comb_index in combinations(sp_ind, n):
numbers = init_numbers.copy()
for index in comb_index:
numbers[index] = i_speckle
yield GeneralCell(self.lattice, self.positions, numbers)
def gen_nodup(self, wy, n, sp):
dup = self.gen_dup(wy, n, sp)
return self.gen_2nodup_gen(dup)
def gen_dup_of_ele(self, ele, n, sp):
# ele is a Specie instance
ele_num = ele.Z
init_numbers = self.init_cell.numbers
i_speckle = sp.Z
sp_ind = [i for i, e in enumerate(init_numbers) if e == ele_num]
for comb_index in combinations(sp_ind, n):
numbers = init_numbers.copy()
for index in comb_index:
numbers[index] = i_speckle
yield GeneralCell(self.lattice, self.positions, numbers)
def gen_nodup_of_ele(self, ele, n, sp):
dup = self.gen_dup_of_ele(ele, n, sp)
return self.gen_2nodup_gen(dup)
def gen_dup_exch(self, sp1, sp2, n):
init_numbers = self.init_cell.numbers
n_sp1 = sp1.Z
n_sp2 = sp2.Z
sp1_ind = [i for i, e in enumerate(init_numbers) if e == n_sp1]
sp2_ind = [i for i, e in enumerate(init_numbers) if e == n_sp2]
for ex_sp1 in combinations(sp1_ind, n):
for ex_sp2 in combinations(sp2_ind, n):
numbers = init_numbers.copy()
for ind_sp1, ind_sp2 in zip(ex_sp1, ex_sp2):
numbers[ind_sp1] = n_sp2
numbers[ind_sp2] = n_sp1
yield GeneralCell(self.lattice, self.positions, numbers)
def gen_nodup_exch(self, sp1, sp2, n):
dup = self.gen_dup_exch(sp1, sp2, n)
return self.gen_2nodup_gen(dup)
def _gen_dup_trinary_alloy(self, sp1, n1, sp2, n2):
init_numbers = self.init_cell.numbers
isp1 = sp1.Z
isp2 = sp2.Z
sp_ind_origin = [i for i, s in enumerate(init_numbers)]
for sp1_comb_index in combinations(sp_ind_origin, n1):
sp_ind_bin = [x for x in sp_ind_origin if x not in sp1_comb_index]
for sp2_comb_index in combinations(sp_ind_bin, n2):
numbers = init_numbers.copy()
for i1, i2 in zip_longest(sp1_comb_index, sp2_comb_index):
if i1 is not None:
numbers[i1] = isp1
if i2 is not None:
numbers[i2] = isp2
yield GeneralCell(self.lattice, self.positions, numbers)
# pdb.set_trace()
def gen_nodup_trinary_alloy(self, sp1, n1, sp2, n2):
dup = self._gen_dup_trinary_alloy(sp1, n1, sp2, n2)
return self.gen_2nodup_gen(dup)
def gen_add_one_speckle_unitary(self, gen, sp):
"""
input a structure generator __ spg_cell(mostly nonduplicate)
output a generator with one more speckle.
This a method give duplicate structures which have one more speckle
than the input structures.
"""
atom = sp.Z
id_db = dict()
for cell in gen:
for index, val in enumerate(cell.numbers):
numbers_new = cell.numbers.copy()
if atom != val:
numbers_new[index] = atom
num_id = numbers2id(numbers_new)
if num_id not in id_db:
yield GeneralCell(cell.lattice, cell.positions,
numbers_new)
id_db[num_id] = None
def gen_add_one_speckle(self, gen, wy, sp):
atom = sp.Z
id_db = dict()
for cell in gen:
wy_ele = zip(self.wyckoffs, cell.numbers)
for index, tuple_w_e in enumerate(wy_ele):
numbers_new = cell.numbers.copy()
if tuple_w_e[0] == wy and atom != tuple_w_e[1]:
numbers_new[index] = atom
num_id = numbers2id(numbers_new)
if num_id not in id_db:
yield GeneralCell(cell.lattice, cell.positions,
numbers_new)
id_db[num_id] = None
def gen_add_one_speckle_of_ele(self, gen, ele, sp):
ele_Z = ele.Z
atom = sp.Z
id_db = dict()
for cell in gen:
# site_ele = zip(self.wyckoffs, cell.numbers)
for index, e in enumerate(cell.numbers):
numbers_new = cell.numbers.copy()
if e == ele_Z and e != atom:
numbers_new[index] = atom
num_id = numbers2id(numbers_new)
if num_id not in id_db:
yield GeneralCell(cell.lattice, cell.positions,
numbers_new)
id_db[num_id] = None
def gen_2nodup_gen(self, dup_gen):
sym_perm = self.symmetry_permutation
isoset = dict()
bar = ProgressBar()
for cell in bar(dup_gen):
if cell.id not in isoset:
## TODO: combine degeneracy here
yield cell
self._update_isoset(isoset, cell.numbers, sym_perm)
@staticmethod
def _update_isoset(isoset, numbers, sym_perm):
for ind in sym_perm:
numbers_new = numbers[ind]
cell_id = numbers2id(numbers_new)
isoset[cell_id] = None
def all_speckle_gen_unitary(self, n, sp):
gen = (i for i in [self.init_cell])
n_init = self.init_cell.get_speckle_num(sp)
print("Mission: Replace with {0:4}, up to\
{1:4d}...".format(sp.name, n))
for i in range(n_init, n):
gen = self.gen_add_one_speckle_unitary(gen, sp)
gen = self.gen_2nodup_gen(gen)
out_gen, gen = tee(gen, 2)
yield out_gen
def all_speckle_gen(self, n, wy, sp):
gen = (i for i in [self.init_cell])
n_init = self.init_cell.get_speckle_num(sp)
print("Mission: Replace with {0:4}, up to\
{1:4d}, in wyckoff site {2:3}...".format(sp.name, n, wy))
for i in range(n_init, n):
gen = self.gen_add_one_speckle(gen, wy, sp)
gen = self.gen_2nodup_gen(gen)
out_gen, gen = tee(gen, 2)
yield out_gen
def all_speckle_gen_of_ele(self, n, ele, sp):
gen = (i for i in [self.init_cell])
n_init = self.init_cell.get_speckle_num(sp)
print("Mission: Replace with {0:4}, up to\
{1:4d}, in wyckoff site {2:3}...".format(sp.name, n, ele.Z))
for i in range(n_init, n):
gen = self.gen_add_one_speckle_of_ele(gen, ele, sp)
gen = self.gen_2nodup_gen(gen)
out_gen, gen = tee(gen, 2)
yield out_gen
def is_stru_equal(struA, struB, ops):
bA, posA, atom_numA = struA.get_cell()
bB, posB, atom_numB = struB.get_cell()
id_struA = _get_id_seq(posA, atom_numA)
is_equal = False
for r, t in ops:
pos_new = np.transpose(np.matmul(r, np.transpose(posB))) + t
id_stru = _get_id_seq(pos_new, atom_numB)
if id_stru == id_struA:
is_equal = True
return is_equal
def _get_id_seq(pos, arr_num):
# from fractions import Fraction
# transfer the atom position into >=0 and <=1
pos = np.around(pos, decimals=10)
func_tofrac = np.vectorize(lambda x: round((x % 1), 3))
o_pos = func_tofrac(pos)
# round_o_pos = np.around(o_pos, decimals=3)
# z, y, x = round_o_pos[:, 2], round_o_pos[:, 1], round_o_pos[:, 0]
z, y, x = o_pos[:, 2], o_pos[:, 1], o_pos[:, 0]
ind_sort = np.lexsort((z, y, x))
id_seq = str(arr_num[ind_sort])
return id_seq
def _get_atom_seq_identifier(numbers):
return str(list(numbers))
def _update_isoset(isoset, numbers, sym_perm):
for ind in sym_perm:
# pos_new = np.transpose(np.matmul(r, np.transpose(pos))) + t
sequence_new = numbers[ind]
id_stru = _get_atom_seq_identifier(sequence_new)
# isoset_cstru.add(id_stru)
# isoset.update(isoset_cstru)
isoset.add(id_stru)
return isoset
def get_new_id_seq(pos, numbers):
"""
A helper function to produce the new sequence of the transformed
structure. Algs is sort the position back to init and use the index
to sort numbers.
"""
# transfer the atom position into >=0 and <=1
pos = np.around(pos, decimals=5)
func_tofrac = np.vectorize(lambda x: round((x % 1), 3))
o_pos = func_tofrac(pos)
# round_o_pos = np.around(o_pos, decimals=3)
# z, y, x = round_o_pos[:, 2], round_o_pos[:, 1], round_o_pos[:, 0]
z, y, x = o_pos[:, 2], o_pos[:, 1], o_pos[:, 0]
inds = np.lexsort((z, y, x))
return inds
def get_permutation_cell(cell):
lat, pos, num = cell
atom_num = len(cell[2])
numbers = [i for i in range(atom_num)]
sym = get_symmetry(cell, symprec=1e-3)
ops = [(r, t) for r, t in zip(sym['rotations'], sym['translations'])]
sym_perm = []
for r,t in ops:
pos_new = np.transpose(np.matmul(r, np.transpose(pos))) + t
perm = get_new_id_seq(pos_new, numbers)
sym_perm.append(perm)
return sym_perm
def gen_nodup_cstru(lattice, sea_ele, size, speckle, num):
d, w, l = size
ele_sea = SitesGrid.sea(d, w, l, sea_ele)
cell_mother_stru = CStru(lattice, ele_sea).get_cell()
sym_perm = get_permutation_cell(cell_mother_stru)
# For testing: Show that the first unit matrix convert to range(num) perm_operator
# print(sym_perm[0])
gen_dup_cstrus = CStru.gen_speckle(lattice, sea_ele, size, speckle, num)
# Add the progress bar when looping
from scipy.special import comb
number_of_structures = comb((d*w*l), num)
bar = ProgressBar(max_value=number_of_structures)
isoset = set()
for cstru in bar(gen_dup_cstrus):
b, pos, atom_num = cstru.get_cell()
#id_cstru = _get_id_seq(pos, atom_num)
id_cstru = _get_atom_seq_identifier(atom_num)
# print(id_cstru)
if id_cstru not in isoset:
# print(len(sym_perm))
# print(len(isoset))
# print(cstru.get_array())
yield cstru
_update_isoset(isoset, atom_num, sym_perm)
def default(str):
return str + ' [Default: %(default)s]'
def lat_dict(lattice):
from math import sqrt
lat = { 'bcc': [[-0.5, -0.5, -0.5],
[-0.5, 0.5, 0.5],
[ 0.5, -0.5, 0.5]],
'fcc': [[0, 0.5, 0.5],
[0.5, 0, 0.5],
[0.5, 0.5, 0]],
'scc': [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
'triflat': [[0, 0, 20],
[1, 0, 0],
[0.5, sqrt(3)/2, 0]]
}
return lat[lattice]
# This function is used for remove the structures conflict with
# the defined restricted condition
# input: a generator to produce structures
# output: a generator of structures satisfied with the restricted
# condition.
def is_speckle_disjunct(cstru, speckle):
m = cstru.m
sites_arr = cstru.get_array()
ele = speckle.Z
pool_sites_arr = _pool_sites(sites_arr)
ele_index = np.argwhere(pool_sites_arr==ele)
points = np.array([_index2coor(ind, m) for ind in ele_index])
min_d = _min_dist(points)
is_disjunct = min_d > 1.01
return is_disjunct
def _min_dist(points):
# get the closest pair of points
# Brute-force algorithm
min_distance = 9999
pairs = combinations(points, 2)
for pair in pairs:
if _dist(pair[0], pair[1]) < min_distance:
min_distance = _dist(pair[0], pair[1])
return min_distance
def _dist(p,q):
dist = np.linalg.norm(p-q)
return dist
def _pool_sites(sites_arr):
d, w, l = np.shape(sites_arr)
pool = sites_arr
# pool the elements of outer dimension (depth)
depth_d = pool[0, :, :].reshape(1,w,l)
pool = np.concatenate((pool, depth_d), axis=0)
# pool the elements of meddle dimension (width)
width_d = pool[:, 0, :].reshape(d+1, 1, l)
pool = np.concatenate((pool, width_d), axis=1)
# pool the elements of inner dimension (length)
length_d = pool[:, :, 0].reshape(d+1, w+1, 1)
pool = np.concatenate((pool, length_d), axis=2)
return pool
def _index2coor(ind, m):
m_arr = np.array(m)
d, w, l = ind
v1 = m_arr[0]*d
v2 = m_arr[1]*w
v3 = m_arr[2]*l
cood = np.array([v1[0]+v2[0]+v3[0], v1[1]+v2[1]+v3[1], v1[2]+v2[2]+v3[2]])
return cood
# def main():
# parser = argparse.ArgumentParser()
# parser.add_argument('--lattice', choices=['bcc', 'fcc', 'scc', 'triflat'],
# help='Lattice type of grid conventional cell')
# parser.add_argument('-b', '--base', dest='sea', required=True,
# help='Element abbreviation of the base specie')
# parser.add_argument('-g', '--size', nargs=3, dest='size', required=True,
# help='Grid size of structure', type=int)
# parser.add_argument('-s', '--speckle', dest='speckle', required=True,
# help='Element abbreviation of the speckle specie')
# parser.add_argument('-n', '--num', dest='number', type=int,
# help=default('Number of speckles filled in the base'), default=2)
# parser.add_argument('-o', '--output-type',
# help=default('Output type of generated non-duplicate periodic grid structure'),
# default='normal')
# args = parser.parse_args()
# size = args.size
# sea_ele = Specie(args.sea)
# speckle = Specie(args.speckle)
# nodup_gen = gen_nodup_cstru(lat_dict(args.lattice), sea_ele, size, speckle, args.number)
# with open('allstru.txt', 'w') as f:
# for s in nodup_gen:
# basis, pos, atom = s.get_cell()
# f.write('ONE NEW STRUCTURE:\n')
# f.write('The basis is:\n')
# f.write('\n'.join(str(line) for line in basis))
# f.write('\nThe position is:\n')
# f.write('\n'.join(str(line) for line in pos))
# f.write('\nThe elements is:\n')
# f.write(str(atom))
# f.write('\n\n\n')
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import re
import unittest
from collections import OrderedDict
from unittest.mock import patch
import pytest
from airflow.exceptions import AirflowException
from airflow.providers.apache.hive.operators.hive_stats import HiveStatsCollectionOperator
from tests.providers.apache.hive import DEFAULT_DATE, DEFAULT_DATE_DS, TestHiveEnvironment
from tests.test_utils.mock_hooks import MockHiveMetastoreHook, MockMySqlHook, MockPrestoHook
class _FakeCol:
def __init__(self, col_name, col_type):
self.name = col_name
self.type = col_type
fake_col = _FakeCol('col', 'string')
class TestHiveStatsCollectionOperator(TestHiveEnvironment):
def setUp(self):
self.kwargs = dict(
table='table',
partition=dict(col='col', value='value'),
metastore_conn_id='metastore_conn_id',
presto_conn_id='presto_conn_id',
mysql_conn_id='mysql_conn_id',
task_id='test_hive_stats_collection_operator',
)
super().setUp()
def test_get_default_exprs(self):
col = 'col'
default_exprs = HiveStatsCollectionOperator(**self.kwargs).get_default_exprs(col, None)
assert default_exprs == {(col, 'non_null'): f'COUNT({col})'}
def test_get_default_exprs_excluded_cols(self):
col = 'excluded_col'
self.kwargs.update(dict(excluded_columns=[col]))
default_exprs = HiveStatsCollectionOperator(**self.kwargs).get_default_exprs(col, None)
assert default_exprs == {}
def test_get_default_exprs_number(self):
col = 'col'
for col_type in ['double', 'int', 'bigint', 'float']:
default_exprs = HiveStatsCollectionOperator(**self.kwargs).get_default_exprs(col, col_type)
assert default_exprs == {
(col, 'avg'): f'AVG({col})',
(col, 'max'): f'MAX({col})',
(col, 'min'): f'MIN({col})',
(col, 'non_null'): f'COUNT({col})',
(col, 'sum'): f'SUM({col})',
}
def test_get_default_exprs_boolean(self):
col = 'col'
col_type = 'boolean'
default_exprs = HiveStatsCollectionOperator(**self.kwargs).get_default_exprs(col, col_type)
assert default_exprs == {
(col, 'false'): f'SUM(CASE WHEN NOT {col} THEN 1 ELSE 0 END)',
(col, 'non_null'): f'COUNT({col})',
(col, 'true'): f'SUM(CASE WHEN {col} THEN 1 ELSE 0 END)',
}
def test_get_default_exprs_string(self):
col = 'col'
col_type = 'string'
default_exprs = HiveStatsCollectionOperator(**self.kwargs).get_default_exprs(col, col_type)
assert default_exprs == {
(col, 'approx_distinct'): f'APPROX_DISTINCT({col})',
(col, 'len'): f'SUM(CAST(LENGTH({col}) AS BIGINT))',
(col, 'non_null'): f'COUNT({col})',
}
@patch('airflow.providers.apache.hive.operators.hive_stats.json.dumps')
@patch('airflow.providers.apache.hive.operators.hive_stats.MySqlHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.PrestoHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook')
def test_execute(self, mock_hive_metastore_hook, mock_presto_hook, mock_mysql_hook, mock_json_dumps):
mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols = [fake_col]
mock_mysql_hook.return_value.get_records.return_value = False
hive_stats_collection_operator = HiveStatsCollectionOperator(**self.kwargs)
hive_stats_collection_operator.execute(context={})
mock_hive_metastore_hook.assert_called_once_with(
metastore_conn_id=hive_stats_collection_operator.metastore_conn_id
)
mock_hive_metastore_hook.return_value.get_table.assert_called_once_with(
table_name=hive_stats_collection_operator.table
)
mock_presto_hook.assert_called_once_with(presto_conn_id=hive_stats_collection_operator.presto_conn_id)
mock_mysql_hook.assert_called_once_with(hive_stats_collection_operator.mysql_conn_id)
mock_json_dumps.assert_called_once_with(hive_stats_collection_operator.partition, sort_keys=True)
field_types = {
col.name: col.type for col in mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols
}
exprs = {('', 'count'): 'COUNT(*)'}
for col, col_type in list(field_types.items()):
exprs.update(hive_stats_collection_operator.get_default_exprs(col, col_type))
exprs = OrderedDict(exprs)
rows = [
(
hive_stats_collection_operator.ds,
hive_stats_collection_operator.dttm,
hive_stats_collection_operator.table,
mock_json_dumps.return_value,
)
+ (r[0][0], r[0][1], r[1])
for r in zip(exprs, mock_presto_hook.return_value.get_first.return_value)
]
mock_mysql_hook.return_value.insert_rows.assert_called_once_with(
table='hive_stats',
rows=rows,
target_fields=[
'ds',
'dttm',
'table_name',
'partition_repr',
'col',
'metric',
'value',
],
)
@patch('airflow.providers.apache.hive.operators.hive_stats.json.dumps')
@patch('airflow.providers.apache.hive.operators.hive_stats.MySqlHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.PrestoHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook')
def test_execute_with_assignment_func(
self, mock_hive_metastore_hook, mock_presto_hook, mock_mysql_hook, mock_json_dumps
):
def assignment_func(col, _):
return {(col, 'test'): f'TEST({col})'}
self.kwargs.update(dict(assignment_func=assignment_func))
mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols = [fake_col]
mock_mysql_hook.return_value.get_records.return_value = False
hive_stats_collection_operator = HiveStatsCollectionOperator(**self.kwargs)
hive_stats_collection_operator.execute(context={})
field_types = {
col.name: col.type for col in mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols
}
exprs = {('', 'count'): 'COUNT(*)'}
for col, col_type in list(field_types.items()):
exprs.update(hive_stats_collection_operator.assignment_func(col, col_type))
exprs = OrderedDict(exprs)
rows = [
(
hive_stats_collection_operator.ds,
hive_stats_collection_operator.dttm,
hive_stats_collection_operator.table,
mock_json_dumps.return_value,
)
+ (r[0][0], r[0][1], r[1])
for r in zip(exprs, mock_presto_hook.return_value.get_first.return_value)
]
mock_mysql_hook.return_value.insert_rows.assert_called_once_with(
table='hive_stats',
rows=rows,
target_fields=[
'ds',
'dttm',
'table_name',
'partition_repr',
'col',
'metric',
'value',
],
)
@patch('airflow.providers.apache.hive.operators.hive_stats.json.dumps')
@patch('airflow.providers.apache.hive.operators.hive_stats.MySqlHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.PrestoHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook')
def test_execute_with_assignment_func_no_return_value(
self, mock_hive_metastore_hook, mock_presto_hook, mock_mysql_hook, mock_json_dumps
):
def assignment_func(_, __):
pass
self.kwargs.update(dict(assignment_func=assignment_func))
mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols = [fake_col]
mock_mysql_hook.return_value.get_records.return_value = False
hive_stats_collection_operator = HiveStatsCollectionOperator(**self.kwargs)
hive_stats_collection_operator.execute(context={})
field_types = {
col.name: col.type for col in mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols
}
exprs = {('', 'count'): 'COUNT(*)'}
for col, col_type in list(field_types.items()):
exprs.update(hive_stats_collection_operator.get_default_exprs(col, col_type))
exprs = OrderedDict(exprs)
rows = [
(
hive_stats_collection_operator.ds,
hive_stats_collection_operator.dttm,
hive_stats_collection_operator.table,
mock_json_dumps.return_value,
)
+ (r[0][0], r[0][1], r[1])
for r in zip(exprs, mock_presto_hook.return_value.get_first.return_value)
]
mock_mysql_hook.return_value.insert_rows.assert_called_once_with(
table='hive_stats',
rows=rows,
target_fields=[
'ds',
'dttm',
'table_name',
'partition_repr',
'col',
'metric',
'value',
],
)
@patch('airflow.providers.apache.hive.operators.hive_stats.MySqlHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.PrestoHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook')
def test_execute_no_query_results(self, mock_hive_metastore_hook, mock_presto_hook, mock_mysql_hook):
mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols = [fake_col]
mock_mysql_hook.return_value.get_records.return_value = False
mock_presto_hook.return_value.get_first.return_value = None
with pytest.raises(AirflowException):
HiveStatsCollectionOperator(**self.kwargs).execute(context={})
@patch('airflow.providers.apache.hive.operators.hive_stats.json.dumps')
@patch('airflow.providers.apache.hive.operators.hive_stats.MySqlHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.PrestoHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook')
def test_execute_delete_previous_runs_rows(
self, mock_hive_metastore_hook, mock_presto_hook, mock_mysql_hook, mock_json_dumps
):
mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols = [fake_col]
mock_mysql_hook.return_value.get_records.return_value = True
hive_stats_collection_operator = HiveStatsCollectionOperator(**self.kwargs)
hive_stats_collection_operator.execute(context={})
sql = f"""
DELETE FROM hive_stats
WHERE
table_name='{hive_stats_collection_operator.table}' AND
partition_repr='{mock_json_dumps.return_value}' AND
dttm='{hive_stats_collection_operator.dttm}';
"""
mock_mysql_hook.return_value.run.assert_called_once_with(sql)
@unittest.skipIf(
'AIRFLOW_RUNALL_TESTS' not in os.environ, "Skipped because AIRFLOW_RUNALL_TESTS is not set"
)
@patch(
'airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook',
side_effect=MockHiveMetastoreHook,
)
def test_runs_for_hive_stats(self, mock_hive_metastore_hook):
mock_mysql_hook = MockMySqlHook()
mock_presto_hook = MockPrestoHook()
with patch(
'airflow.providers.apache.hive.operators.hive_stats.PrestoHook', return_value=mock_presto_hook
):
with patch(
'airflow.providers.apache.hive.operators.hive_stats.MySqlHook', return_value=mock_mysql_hook
):
op = HiveStatsCollectionOperator(
task_id='hive_stats_check',
table="airflow.static_babynames_partitioned",
partition={'ds': DEFAULT_DATE_DS},
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
select_count_query = (
"SELECT COUNT(*) AS __count FROM airflow."
+ "static_babynames_partitioned WHERE ds = '2015-01-01';"
)
mock_presto_hook.get_first.assert_called_with(hql=select_count_query)
expected_stats_select_query = (
"SELECT 1 FROM hive_stats WHERE table_name='airflow."
+ "static_babynames_partitioned' AND "
+ "partition_repr='{\"ds\": \"2015-01-01\"}' AND "
+ "dttm='2015-01-01T00:00:00+00:00' "
+ "LIMIT 1;"
)
raw_stats_select_query = mock_mysql_hook.get_records.call_args_list[0][0][0]
actual_stats_select_query = re.sub(r'\s{2,}', ' ', raw_stats_select_query).strip()
assert expected_stats_select_query == actual_stats_select_query
insert_rows_val = [
(
'2015-01-01',
'2015-01-01T00:00:00+00:00',
'airflow.static_babynames_partitioned',
'{"ds": "2015-01-01"}',
'',
'count',
['val_0', 'val_1'],
)
]
mock_mysql_hook.insert_rows.assert_called_with(
table='hive_stats',
rows=insert_rows_val,
target_fields=[
'ds',
'dttm',
'table_name',
'partition_repr',
'col',
'metric',
'value',
],
)
|
|
"""
Unit tests for the frontend code.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import logging
import ga4gh.datamodel as datamodel
import ga4gh.frontend as frontend
import ga4gh.protocol as protocol
class TestFrontend(unittest.TestCase):
"""
Tests the basic routing and HTTP handling for the Flask app.
"""
exampleUrl = 'www.example.com'
@classmethod
def setUpClass(cls):
config = {
"DATA_SOURCE": "__SIMULATED__",
"SIMULATED_BACKEND_RANDOM_SEED": 1111,
"SIMULATED_BACKEND_NUM_CALLS": 1,
"SIMULATED_BACKEND_VARIANT_DENSITY": 1.0,
"SIMULATED_BACKEND_NUM_VARIANT_SETS": 1,
# "DEBUG" : True
}
frontend.reset()
frontend.configure(
baseConfig="TestConfig", extraConfig=config)
cls.app = frontend.app.test_client()
# silence usually unhelpful CORS log
logging.getLogger('ga4gh.frontend.cors').setLevel(logging.CRITICAL)
# example test values
cls.backend = frontend.app.backend
cls.referenceSet = cls.backend.getReferenceSets()[0]
cls.referenceSetId = cls.referenceSet.getId()
cls.reference = cls.referenceSet.getReferences()[0]
cls.referenceId = cls.reference.getId()
cls.dataset = cls.backend.getDatasets()[0]
cls.datasetId = cls.dataset.getId()
cls.variantSet = cls.dataset.getVariantSets()[0]
cls.variantSetId = cls.variantSet.getId()
gaVariant = cls.variantSet.getVariants("1", 0, 2**32).next()
cls.variantId = gaVariant.id
cls.callSet = cls.variantSet.getCallSets()[0]
cls.callSetId = cls.callSet.getId()
cls.readGroupSet = cls.dataset.getReadGroupSets()[0]
cls.readGroupSetId = cls.readGroupSet.getId()
cls.readGroup = cls.readGroupSet.getReadGroups()[0]
cls.readGroupId = cls.readGroup.getId()
cls.readAlignment = cls.readGroup.getReadAlignments().next()
cls.readAlignmentId = cls.readAlignment.id
def sendPostRequest(self, path, request):
"""
Sends the specified GA request object and returns the response.
"""
headers = {
'Content-type': 'application/json',
'Origin': self.exampleUrl,
}
return self.app.post(
path, headers=headers, data=request.toJsonString())
def sendGetRequest(self, path):
"""
Sends a get request to the specified URL and returns the response.
"""
headers = {
'Origin': self.exampleUrl,
}
return self.app.get(path, headers=headers)
def sendVariantsSearch(self):
response = self.sendVariantSetsSearch()
variantSets = protocol.SearchVariantSetsResponse().fromJsonString(
response.data).variantSets
request = protocol.SearchVariantsRequest()
request.variantSetId = variantSets[0].id
request.referenceName = "1"
request.start = 0
request.end = 1
return self.sendPostRequest('/variants/search', request)
def sendVariantSetsSearch(self):
request = protocol.SearchVariantSetsRequest()
request.datasetId = self.datasetId
return self.sendPostRequest('/variantsets/search', request)
def sendCallSetsSearch(self):
response = self.sendVariantSetsSearch()
variantSets = protocol.SearchVariantSetsResponse().fromJsonString(
response.data).variantSets
request = protocol.SearchCallSetsRequest()
request.variantSetId = variantSets[0].id
return self.sendPostRequest('/callsets/search', request)
def sendReadsSearch(self, readGroupIds=None):
if readGroupIds is None:
readGroupIds = [self.readGroupId]
request = protocol.SearchReadsRequest()
request.readGroupIds = readGroupIds
request.referenceId = self.referenceId
return self.sendPostRequest('/reads/search', request)
def sendDatasetsSearch(self):
request = protocol.SearchDatasetsRequest()
return self.sendPostRequest('/datasets/search', request)
def sendReferencesSearch(self):
path = "/references/search"
request = protocol.SearchReferencesRequest()
response = self.sendPostRequest(path, request)
return response
def sendGetVariant(self, id_=None):
if id_ is None:
id_ = self.variantId
path = "/variants/{}".format(id_)
response = self.sendGetRequest(path)
return response
def sendGetVariantSet(self, id_=None):
if id_ is None:
id_ = self.variantSetId
path = "/variantsets/{}".format(id_)
response = self.sendGetRequest(path)
return response
def sendGetDataset(self, id_=None):
if id_ is None:
id_ = self.datasetId
path = "/datasets/{}".format(id_)
response = self.sendGetRequest(path)
return response
def sendGetReadGroup(self, id_=None):
if id_ is None:
id_ = self.readGroupId
path = "/readgroups/{}".format(id_)
response = self.sendGetRequest(path)
return response
def sendGetReference(self, id_=None):
if id_ is None:
id_ = self.referenceId
path = "/references/{}".format(id_)
response = self.sendGetRequest(path)
return response
def sendGetReadGroupSet(self, id_=None):
if id_ is None:
id_ = self.readGroupSetId
path = "/readgroupsets/{}".format(id_)
response = self.sendGetRequest(path)
return response
def sendGetCallset(self, id_=None):
if id_ is None:
id_ = self.callSetId
path = "/callsets/{}".format(id_)
response = self.sendGetRequest(path)
return response
def sendGetReferenceSet(self, id_=None):
if id_ is None:
id_ = self.referenceSetId
path = "/referencesets/{}".format(id_)
response = self.sendGetRequest(path)
return response
def sendListRequest(self, path, request):
headers = {
'Origin': self.exampleUrl,
}
data = request.toJsonDict()
response = self.app.get(path, data=data, headers=headers)
return response
def sendReferenceBasesList(self, id_=None):
if id_ is None:
id_ = self.referenceId
path = "/references/{}/bases".format(id_)
request = protocol.ListReferenceBasesRequest()
response = self.sendListRequest(path, request)
return response
def test404sReturnJson(self):
paths = [
'/doesNotExist',
'/reads/sea',
'/variantsets/id/doesNotExist',
]
for path in paths:
response = self.app.get(path)
protocol.GAException.fromJsonString(response.get_data())
self.assertEqual(404, response.status_code)
def testCors(self):
def assertHeaders(response):
self.assertEqual(self.exampleUrl,
response.headers['Access-Control-Allow-Origin'])
self.assertTrue('Content-Type' in response.headers)
# Post-based search methods
assertHeaders(self.sendVariantsSearch())
assertHeaders(self.sendVariantSetsSearch())
assertHeaders(self.sendReadsSearch())
assertHeaders(self.sendReferencesSearch())
assertHeaders(self.sendReferenceBasesList())
assertHeaders(self.sendDatasetsSearch())
# Get-based accessor methods
assertHeaders(self.sendGetVariantSet())
assertHeaders(self.sendGetReference())
assertHeaders(self.sendGetReferenceSet())
assertHeaders(self.sendGetReadGroupSet())
assertHeaders(self.sendGetReadGroup())
assertHeaders(self.sendGetVariant())
assertHeaders(self.sendGetDataset())
# TODO: Test other methods as they are implemented
def verifySearchRouting(self, path, getDefined=False):
"""
Verifies that the specified path has the correct routing for a
search command. If getDefined is False we check to see if it
returns the correct status code.
"""
response = self.app.post(path)
protocol.GAException.fromJsonString(response.get_data())
self.assertEqual(415, response.status_code)
if not getDefined:
getResponse = self.app.get(path)
protocol.GAException.fromJsonString(getResponse.get_data())
self.assertEqual(405, getResponse.status_code)
# Malformed requests should return 400
for badJson in ["", None, "JSON", "<xml/>", "{]"]:
badResponse = self.app.post(
path, data=badJson,
headers={'Content-type': 'application/json'})
self.assertEqual(400, badResponse.status_code)
# OPTIONS should return success
self.assertEqual(200, self.app.options(path).status_code)
def testRouteReferences(self):
referenceId = self.referenceId
paths = ['/references/{}', '/references/{}/bases']
for path in paths:
path = path.format(referenceId)
self.assertEqual(200, self.app.get(path).status_code)
referenceSetId = self.referenceSetId
paths = ['/referencesets/{}']
for path in paths:
path = path.format(referenceSetId)
self.assertEqual(200, self.app.get(path).status_code)
self.verifySearchRouting('/referencesets/search', True)
self.verifySearchRouting('/references/search', True)
def testRouteCallsets(self):
path = '/callsets/search'
self.assertEqual(415, self.app.post(path).status_code)
self.assertEqual(200, self.app.options(path).status_code)
self.assertEqual(405, self.app.get(path).status_code)
def testRouteReads(self):
paths = ['/reads/search', '/readgroupsets/search']
for path in paths:
self.verifySearchRouting(path)
def testRouteVariants(self):
self.verifySearchRouting('/variantsets/search', True)
self.verifySearchRouting('/variants/search', False)
def testRouteIndex(self):
path = "/"
response = self.app.get(path)
self.assertEqual(200, response.status_code)
self.assertEqual("text/html", response.mimetype)
self.assertGreater(len(response.data), 0)
def testVariantsSearch(self):
response = self.sendVariantsSearch()
self.assertEqual(200, response.status_code)
responseData = protocol.SearchVariantsResponse.fromJsonString(
response.data)
self.assertEqual(len(responseData.variants), 1)
def testVariantSetsSearch(self):
response = self.sendVariantSetsSearch()
self.assertEqual(200, response.status_code)
responseData = protocol.SearchVariantSetsResponse.fromJsonString(
response.data)
self.assertEqual(len(responseData.variantSets), 1)
def testGetDataset(self):
# Test OK: ID found
response = self.sendDatasetsSearch()
responseData = protocol.SearchDatasetsResponse.fromJsonString(
response.data)
datasetId = responseData.datasets[0].id
response = self.sendGetDataset(datasetId)
self.assertEqual(200, response.status_code)
# Test Error: 404, ID not found
obfuscated = datamodel.CompoundId.obfuscate("notValid")
compoundId = datamodel.DatasetCompoundId.parse(obfuscated)
response = self.sendGetDataset(str(compoundId))
self.assertEqual(404, response.status_code)
def testGetVariantSet(self):
response = self.sendVariantSetsSearch()
responseData = protocol.SearchVariantSetsResponse.fromJsonString(
response.data)
variantSetId = responseData.variantSets[0].id
response = self.sendGetVariantSet(variantSetId)
self.assertEqual(200, response.status_code)
obfuscated = datamodel.CompoundId.obfuscate("notValid:notValid")
compoundId = datamodel.VariantSetCompoundId.parse(obfuscated)
response = self.sendGetVariantSet(str(compoundId))
self.assertEqual(404, response.status_code)
def testGetReadGroupSet(self):
response = self.sendGetReadGroupSet()
self.assertEqual(200, response.status_code)
responseData = protocol.ReadGroupSet.fromJsonString(
response.data)
self.assertEqual(
responseData.id, self.readGroupSetId)
def testGetReadGroup(self):
response = self.sendGetReadGroup()
self.assertEqual(200, response.status_code)
responseData = protocol.ReadGroup.fromJsonString(
response.data)
self.assertEqual(
responseData.id, self.readGroupId)
def testGetCallset(self):
response = self.sendGetCallset()
self.assertEqual(200, response.status_code)
responseData = protocol.CallSet.fromJsonString(
response.data)
self.assertEqual(
responseData.id, self.callSetId)
def testGetVariant(self):
response = self.sendGetVariant()
self.assertEqual(200, response.status_code)
def testCallSetsSearch(self):
response = self.sendCallSetsSearch()
self.assertEqual(200, response.status_code)
responseData = protocol.SearchCallSetsResponse.fromJsonString(
response.data)
self.assertEqual(len(responseData.callSets), 1)
def testReadsSearch(self):
response = self.sendReadsSearch()
self.assertEqual(200, response.status_code)
responseData = protocol.SearchReadsResponse.fromJsonString(
response.data)
self.assertEqual(len(responseData.alignments), 2)
self.assertEqual(
responseData.alignments[0].id,
self.readAlignmentId)
def testDatasetsSearch(self):
response = self.sendDatasetsSearch()
responseData = protocol.SearchDatasetsResponse.fromJsonString(
response.data)
datasets = list(responseData.datasets)
self.assertEqual(self.datasetId, datasets[0].id)
def testNoAuthentication(self):
path = '/oauth2callback'
self.assertEqual(501, self.app.get(path).status_code)
|
|
#=======================================================================
# queues.py
#=======================================================================
'''A collection of queue model implementations.'''
from pymtl import *
from pclib.ifcs import InValRdyBundle, OutValRdyBundle
from pclib.rtl import RegEn, Mux, RegisterFile
#-----------------------------------------------------------------------
# SingleElementNormalQueue
#-----------------------------------------------------------------------
class SingleElementNormalQueue( Model ):
def __init__( s, dtype ):
s.enq = InValRdyBundle ( dtype )
s.deq = OutValRdyBundle( dtype )
# Ctrl and Dpath unit instantiation
s.ctrl = SingleElementNormalQueueCtrl ()
s.dpath = SingleElementNormalQueueDpath( dtype )
# Ctrl unit connections
s.connect( s.ctrl.enq_val, s.enq.val )
s.connect( s.ctrl.enq_rdy, s.enq.rdy )
s.connect( s.ctrl.deq_val, s.deq.val )
s.connect( s.ctrl.deq_rdy, s.deq.rdy )
# Dpath unit connections
s.connect( s.dpath.enq_bits, s.enq.msg )
s.connect( s.dpath.deq_bits, s.deq.msg )
# Control Signal connections (ctrl -> dpath)
s.connect( s.dpath.wen, s.ctrl.wen )
def line_trace( s ):
return "{} () {}".format( s.enq, s.deq )
#-----------------------------------------------------------------------
# SingleElementNormalQueueDpath
#-----------------------------------------------------------------------
class SingleElementNormalQueueDpath( Model ):
def __init__( s, dtype ):
s.enq_bits = InPort ( dtype )
s.deq_bits = OutPort ( dtype )
# Control signal (ctrl -> dpath)
s.wen = InPort ( 1 )
# Queue storage
s.queue = RegEn( dtype )
# Connect queue storage
s.connect( s.queue.en, s.wen )
s.connect( s.queue.in_, s.enq_bits )
s.connect( s.queue.out, s.deq_bits )
#-----------------------------------------------------------------------
# SingleElementNormalQueueCtrl
#-----------------------------------------------------------------------
class SingleElementNormalQueueCtrl( Model ):
def __init__( s ):
# Interface Ports
s.enq_val = InPort ( 1 )
s.enq_rdy = OutPort ( 1 )
s.deq_val = OutPort ( 1 )
s.deq_rdy = InPort ( 1 )
# Control signal (ctrl -> dpath)
s.wen = OutPort ( 1 )
s.full = Wire ( 1 )
@s.combinational
def comb():
# wen control signal: set the write enable signal if the storage queue
# is empty and a valid enqueue request is present
s.wen.value = ~s.full & s.enq_val
# enq_rdy signal is asserted when the single element queue storage is
# empty
s.enq_rdy.value = ~s.full
# deq_val signal is asserted when the single element queue storage is
# full
s.deq_val.value = s.full
@s.posedge_clk
def seq():
# full bit calculation: the full bit is cleared when a dequeue
# transaction occurs, the full bit is set when the queue storage is
# empty and a enqueue transaction occurs
if s.reset: s.full.next = 0
elif s.deq_rdy and s.deq_val: s.full.next = 0
elif s.enq_rdy and s.enq_val: s.full.next = 1
else: s.full.next = s.full
#-----------------------------------------------------------------------
# SingleElementBypassQueue
#-----------------------------------------------------------------------
class SingleElementBypassQueue( Model ):
def __init__( s, dtype ):
s.enq = InValRdyBundle ( dtype )
s.deq = OutValRdyBundle( dtype )
# set high if full
s.full = OutPort( 1 )
# Ctrl and Dpath unit instantiation
s.ctrl = SingleElementBypassQueueCtrl ()
s.dpath = SingleElementBypassQueueDpath( dtype )
# Ctrl unit connections
s.connect( s.ctrl.enq_val, s.enq.val )
s.connect( s.ctrl.enq_rdy, s.enq.rdy )
s.connect( s.ctrl.deq_val, s.deq.val )
s.connect( s.ctrl.deq_rdy, s.deq.rdy )
s.connect( s.ctrl.full, s.full )
# Dpath unit connections
s.connect( s.dpath.enq_bits, s.enq.msg )
s.connect( s.dpath.deq_bits, s.deq.msg )
# Control Signal connections (ctrl -> dpath)
s.connect( s.dpath.wen, s.ctrl.wen )
s.connect( s.dpath.bypass_mux_sel, s.ctrl.bypass_mux_sel )
def line_trace( s ):
return "{} (v{},r{}) {}".format( s.enq, s.deq.val, s.deq.rdy, s.deq )
#-----------------------------------------------------------------------
# SingleElementBypassQueueDpath
#-----------------------------------------------------------------------
class SingleElementBypassQueueDpath( Model ):
def __init__( s, dtype ):
s.enq_bits = InPort ( dtype )
s.deq_bits = OutPort ( dtype )
# Control signal (ctrl -> dpath)
s.wen = InPort ( 1 )
s.bypass_mux_sel = InPort ( 1 )
# Queue storage
s.queue = RegEn( dtype )
s.connect( s.queue.en, s.wen )
s.connect( s.queue.in_, s.enq_bits )
# Bypass mux
s.bypass_mux = Mux( dtype, 2 )
s.connect( s.bypass_mux.in_[0], s.queue.out )
s.connect( s.bypass_mux.in_[1], s.enq_bits )
s.connect( s.bypass_mux.sel, s.bypass_mux_sel )
s.connect( s.bypass_mux.out, s.deq_bits )
#-----------------------------------------------------------------------
# SingleElementBypassQueueCtrl
#-----------------------------------------------------------------------
class SingleElementBypassQueueCtrl( Model ):
def __init__( s ):
s.enq_val = InPort ( 1 )
s.enq_rdy = OutPort ( 1 )
s.deq_val = OutPort ( 1 )
s.deq_rdy = InPort ( 1 )
# Control signal (ctrl -> dpath)
s.wen = OutPort ( 1 )
s.bypass_mux_sel = OutPort ( 1 )
# Full bit storage
s.full = OutPort( 1 )
# TODO: figure out how to make these work as temporaries
s.do_deq = Wire ( 1 )
s.do_enq = Wire ( 1 )
s.do_bypass = Wire ( 1 )
@s.combinational
def comb():
# bypass is always enabled when the queue is empty
s.bypass_mux_sel.value = ~s.full
# wen control signal: set the write enable signal if the storage queue
# is empty and a valid enqueue request is present
s.wen.value = ~s.full & s.enq_val
# enq_rdy signal is asserted when the single element queue storage is
# empty
s.enq_rdy.value = ~s.full
# deq_val signal is asserted when the single element queue storage is
# full or when the queue is empty but we are bypassing
s.deq_val.value = s.full | ( ~s.full & s.enq_val )
# TODO: figure out how to make these work as temporaries
# helper signals
s.do_deq.value = s.deq_rdy and s.deq_val
s.do_enq.value = s.enq_rdy and s.enq_val
s.do_bypass.value = ~s.full and s.do_deq and s.do_enq
@s.posedge_clk
def seq():
# TODO: can't use temporaries here, verilog simulation semantics
# don't match the Python semantics!
## helper signals
#do_deq = s.deq_rdy and s.deq_val
#do_enq = s.enq_rdy and s.enq_val
#do_bypass = ~s.full and do_deq and do_enq
# full bit calculation: the full bit is cleared when a dequeue
# transaction occurs; the full bit is set when the queue storage is
# empty and a enqueue transaction occurs and when we are not bypassing
if s.reset: s.full.next = 0
elif s.do_deq: s.full.next = 0
elif s.do_enq and not s.do_bypass: s.full.next = 1
else: s.full.next = s.full
#-----------------------------------------------------------------------
# NormalQueue
#-----------------------------------------------------------------------
class NormalQueue( Model ):
def __init__( s, num_entries, dtype ):
s.enq = InValRdyBundle ( dtype )
s.deq = OutValRdyBundle( dtype )
s.num_free_entries = OutPort( get_nbits(num_entries) )
# Ctrl and Dpath unit instantiation
s.ctrl = NormalQueueCtrl ( num_entries )
s.dpath = NormalQueueDpath( num_entries, dtype )
# Ctrl unit connections
s.connect( s.ctrl.enq_val, s.enq.val )
s.connect( s.ctrl.enq_rdy, s.enq.rdy )
s.connect( s.ctrl.deq_val, s.deq.val )
s.connect( s.ctrl.deq_rdy, s.deq.rdy )
s.connect( s.ctrl.num_free_entries, s.num_free_entries )
# Dpath unit connections
s.connect( s.dpath.enq_bits, s.enq.msg )
s.connect( s.dpath.deq_bits, s.deq.msg )
# Control Signal connections (ctrl -> dpath)
s.connect( s.dpath.wen, s.ctrl.wen )
s.connect( s.dpath.waddr, s.ctrl.waddr )
s.connect( s.dpath.raddr, s.ctrl.raddr )
def line_trace( s ):
return "{} () {}".format( s.enq, s.deq )
#-----------------------------------------------------------------------
# NormalQueueDpath
#-----------------------------------------------------------------------
class NormalQueueDpath( Model ):
def __init__( s, num_entries, dtype ):
s.enq_bits = InPort ( dtype )
s.deq_bits = OutPort ( dtype )
# Control signal (ctrl -> dpath)
addr_nbits = clog2( num_entries )
s.wen = InPort ( 1 )
s.waddr = InPort ( addr_nbits )
s.raddr = InPort ( addr_nbits )
# Queue storage
s.queue = RegisterFile( dtype, num_entries )
# Connect queue storage
s.connect( s.queue.rd_addr[0], s.raddr )
s.connect( s.queue.rd_data[0], s.deq_bits )
s.connect( s.queue.wr_en, s.wen )
s.connect( s.queue.wr_addr, s.waddr )
s.connect( s.queue.wr_data, s.enq_bits )
#-----------------------------------------------------------------------
# NormalQueueCtrl
#-----------------------------------------------------------------------
class NormalQueueCtrl( Model ):
def __init__( s, num_entries ):
s.num_entries = num_entries
addr_nbits = clog2( num_entries )
# Interface Ports
s.enq_val = InPort ( 1 )
s.enq_rdy = OutPort ( 1 )
s.deq_val = OutPort ( 1 )
s.deq_rdy = InPort ( 1 )
s.num_free_entries = OutPort ( get_nbits( num_entries ) )
# Control signal (ctrl -> dpath)
s.wen = OutPort ( 1 )
s.waddr = OutPort ( addr_nbits )
s.raddr = OutPort ( addr_nbits )
# Wires
s.full = Wire ( 1 )
s.empty = Wire ( 1 )
s.do_enq = Wire ( 1 )
s.do_deq = Wire ( 1 )
s.enq_ptr = Wire ( addr_nbits )
s.deq_ptr = Wire ( addr_nbits )
s.enq_ptr_next = Wire ( addr_nbits )
s.deq_ptr_next = Wire ( addr_nbits )
# TODO: can't infer these temporaries due to if statement, fix
s.enq_ptr_inc = Wire ( addr_nbits )
s.deq_ptr_inc = Wire ( addr_nbits )
s.full_next_cycle = Wire ( 1 )
s.last_idx = num_entries - 1
@s.combinational
def comb():
# set output signals
s.empty.value = not s.full and (s.enq_ptr == s.deq_ptr)
s.enq_rdy.value = not s.full
s.deq_val.value = not s.empty
# only enqueue/dequeue if valid and ready
s.do_enq.value = s.enq_rdy and s.enq_val
s.do_deq.value = s.deq_rdy and s.deq_val
# set control signals
s.wen.value = s.do_enq
s.waddr.value = s.enq_ptr
s.raddr.value = s.deq_ptr
# enq ptr incrementer
if s.enq_ptr == s.last_idx: s.enq_ptr_inc.value = 0
else: s.enq_ptr_inc.value = s.enq_ptr + 1
# deq ptr incrementer
if s.deq_ptr == s.last_idx: s.deq_ptr_inc.value = 0
else: s.deq_ptr_inc.value = s.deq_ptr + 1
# set the next ptr value
if s.do_enq: s.enq_ptr_next.value = s.enq_ptr_inc
else: s.enq_ptr_next.value = s.enq_ptr
if s.do_deq: s.deq_ptr_next.value = s.deq_ptr_inc
else: s.deq_ptr_next.value = s.deq_ptr
# number of free entries calculation
if s.reset:
s.num_free_entries.value = s.num_entries
elif s.full:
s.num_free_entries.value = 0
elif s.empty:
s.num_free_entries.value = s.num_entries
elif s.enq_ptr > s.deq_ptr:
s.num_free_entries.value = s.num_entries - ( s.enq_ptr - s.deq_ptr )
elif s.deq_ptr > s.enq_ptr:
s.num_free_entries.value = s.deq_ptr - s.enq_ptr
s.full_next_cycle.value = (s.do_enq and not s.do_deq and
(s.enq_ptr_next == s.deq_ptr))
@s.posedge_clk
def seq():
if s.reset: s.deq_ptr.next = 0
else: s.deq_ptr.next = s.deq_ptr_next
if s.reset: s.enq_ptr.next = 0
else: s.enq_ptr.next = s.enq_ptr_next
if s.reset: s.full.next = 0
elif s.full_next_cycle: s.full.next = 1
elif (s.do_deq and s.full): s.full.next = 0
else: s.full.next = s.full
#-----------------------------------------------------------------------
# SingleElementPipelinedQueue
#-----------------------------------------------------------------------
class SingleElementPipelinedQueue( Model ):
def __init__( s, dtype ):
s.enq = InValRdyBundle ( dtype )
s.deq = OutValRdyBundle( dtype )
# Ctrl and Dpath unit instantiation
s.ctrl = SingleElementPipelinedQueueCtrl ()
s.dpath = SingleElementPipelinedQueueDpath( dtype )
# Ctrl unit connections
s.connect( s.ctrl.enq_val, s.enq.val )
s.connect( s.ctrl.enq_rdy, s.enq.rdy )
s.connect( s.ctrl.deq_val, s.deq.val )
s.connect( s.ctrl.deq_rdy, s.deq.rdy )
# Dpath unit connections
s.connect( s.dpath.enq_bits, s.enq.msg )
s.connect( s.dpath.deq_bits, s.deq.msg )
# Control Signal connections (ctrl -> dpath)
s.connect( s.dpath.wen, s.ctrl.wen )
def line_trace( s ):
return "{} () {}".format( s.enq, s.deq )
#-----------------------------------------------------------------------
# SingleElementPipelinedQueueDpath
#-----------------------------------------------------------------------
class SingleElementPipelinedQueueDpath( Model ):
def __init__( s, dtype ):
s.enq_bits = InPort ( dtype )
s.deq_bits = OutPort ( dtype )
# Control signal (ctrl -> dpath)
s.wen = InPort ( 1 )
# Queue storage
s.queue = RegEn( dtype )
# Connect queue storage
s.connect( s.queue.en, s.wen )
s.connect( s.queue.in_, s.enq_bits )
s.connect( s.queue.out, s.deq_bits )
#-----------------------------------------------------------------------
# SingleElementPipelinedQueueCtrl
#-----------------------------------------------------------------------
class SingleElementPipelinedQueueCtrl( Model ):
def __init__( s ):
# Interface Ports
s.enq_val = InPort ( 1 )
s.enq_rdy = OutPort ( 1 )
s.deq_val = OutPort ( 1 )
s.deq_rdy = InPort ( 1 )
# Control signal (ctrl -> dpath)
s.wen = OutPort ( 1 )
# Full bit storage
s.full = Wire ( 1 )
# Temporary Wires
s.do_enq = Wire( 1 )
s.do_deq = Wire( 1 )
s.do_pipe = Wire( 1 )
@s.combinational
def comb():
# enq_rdy signal is asserted when the single element queue storage is
# empty
s.enq_rdy.value = ~s.full | ( s.full & s.deq_rdy )
# deq_val signal is asserted when the single element queue storage is
# full or when the queue is empty but we are bypassing
s.deq_val.value = s.full
# wen control signal: set the write enable signal if the storage queue
# is empty and a valid enqueue request is present
s.wen.value = ( ~s.full | ( s.full & s.deq_rdy ) ) & s.enq_val
# helper signals
s.do_deq.value = s.deq_rdy & s.deq_val
s.do_enq.value = s.enq_rdy & s.enq_val
s.do_pipe.value = s.full & s.do_deq & s.do_enq
@s.posedge_clk
def seq():
# full bit calculation: the full bit is cleared when a dequeue
# transaction occurs; the full bit is set when the queue storage is
# empty and a enqueue transaction occurs and when we are not bypassing
if s.reset: s.full.next = 0
elif s.do_deq & ~s.do_pipe: s.full.next = 0
elif s.do_enq: s.full.next = 1
else: s.full.next = s.full
#-----------------------------------------------------------------------
# SingleElementSkidQueue
#-----------------------------------------------------------------------
class SingleElementSkidQueue( Model ):
'''Similiar to bypass queue, but saves value even if bypassed.
Can dequeue and enqueue on the same clock edge.
'''
def __init__( s, dtype ):
s.enq = InValRdyBundle ( dtype )
s.deq = OutValRdyBundle( dtype )
# Ctrl and Dpath unit instantiation
s.ctrl = SingleElementSkidQueueCtrl ()
s.dpath = SingleElementSkidQueueDpath( dtype )
# Ctrl unit connections
s.connect( s.ctrl.enq_val, s.enq.val )
s.connect( s.ctrl.enq_rdy, s.enq.rdy )
s.connect( s.ctrl.deq_val, s.deq.val )
s.connect( s.ctrl.deq_rdy, s.deq.rdy )
# Dpath unit connections
s.connect( s.dpath.enq_bits, s.enq.msg )
s.connect( s.dpath.deq_bits, s.deq.msg )
# Control Signal connections (ctrl -> dpath)
s.connect( s.dpath.wen, s.ctrl.wen )
s.connect( s.dpath.bypass_mux_sel, s.ctrl.bypass_mux_sel )
def line_trace( s ):
return "{} ({}, {}) {}".format( s.enq,s.ctrl.do_bypass,s.enq.msg, s.deq )
#-----------------------------------------------------------------------
# SingleElementSkidQueueDpath
#-----------------------------------------------------------------------
class SingleElementSkidQueueDpath( Model ):
def __init__( s, dtype ):
s.enq_bits = InPort ( dtype )
s.deq_bits = OutPort ( dtype )
# Control signal (ctrl -> dpath)
s.wen = InPort ( 1 )
s.bypass_mux_sel = InPort ( 1 )
# Queue storage
s.queue = RegEn( dtype )
s.connect( s.queue.en, s.wen )
s.connect( s.queue.in_, s.enq_bits )
# Bypass mux
s.bypass_mux = Mux( dtype, 2 )
s.connect( s.bypass_mux.in_[0], s.queue.out )
s.connect( s.bypass_mux.in_[1], s.enq_bits )
s.connect( s.bypass_mux.sel, s.bypass_mux_sel )
s.connect( s.bypass_mux.out, s.deq_bits )
#-----------------------------------------------------------------------
# SingleElementSkidQueueCtrl
#-----------------------------------------------------------------------
class SingleElementSkidQueueCtrl( Model ):
def __init__( s ):
s.enq_val = InPort ( 1 )
s.enq_rdy = OutPort ( 1 )
s.deq_val = OutPort ( 1 )
s.deq_rdy = InPort ( 1 )
# Control signal (ctrl -> dpath)
s.wen = OutPort ( 1 )
s.bypass_mux_sel = OutPort ( 1 )
# Full bit storage
s.full = Wire ( 1 )
# TODO: figure out how to make these work as temporaries
s.do_deq = Wire ( 1 )
s.do_enq = Wire ( 1 )
s.do_bypass = Wire ( 1 )
@s.combinational
def comb():
#Dequeue is valid if the queue has an element or is bypassing
s.deq_val.value = s.full | ( ~s.full & s.enq_val )
#Dequeue only if the sink is ready and the deq is valid
s.do_deq.value = s.deq_rdy and s.deq_val
#Queue can take a new element if the queue is empty or if
#queue is dequeuing
s.enq_rdy.value = ~s.full | s.do_deq;
s.do_enq.value = s.enq_rdy and s.enq_val
s.wen.value = s.do_enq
s.do_bypass.value = s.do_deq and s.do_enq
s.bypass_mux_sel.value = s.do_bypass
@s.posedge_clk
def seq():
# TODO: can't use temporaries here, verilog simulation semantics
# don't match the Python semantics!
## helper signals
#do_deq = s.deq_rdy and s.deq_val
#do_enq = s.enq_rdy and s.enq_val
#do_bypass = ~s.full and do_deq and do_enq
# full bit calculation: the full bit is cleared when a dequeue
# transaction occurs and a new enque is not happening or when
# an element is bypassed;
# the full bit is set when the queue storage is
# empty and a enqueue transaction occurs or when the queue is full
# and both a enqueue and dequeue are occuring
if s.reset: s.full.next = 0
elif s.do_deq and not s.do_enq: s.full.next = 0
elif s.do_enq: s.full.next = 1
else: s.full.next = s.full
#-------------------------------------------------------------------------
# TwoElementBypassQueuePRTL.py
#-------------------------------------------------------------------------
# FIXME: This is just cascaded two single-element bypass queues. We definitely
# need a better one.
class TwoElementBypassQueue( Model ):
def __init__( s, dtype ):
s.enq = InValRdyBundle ( dtype )
s.deq = OutValRdyBundle( dtype )
s.full = OutPort( 1 )
s.empty = OutPort( 1 )
s.queue0 = SingleElementBypassQueue( dtype )
s.queue1 = SingleElementBypassQueue( dtype )
s.connect_pairs( s.enq, s.queue0.enq,
s.queue0.deq, s.queue1.enq,
s.queue1.deq, s.deq
)
@s.combinational
def full_empty():
s.full.value = s.queue0.full & s.queue1.full
s.empty.value = (~s.queue0.full) & (~s.queue1.full)
def line_trace( s ):
return "{} (v{},r{}) {}".format( s.enq, s.deq.val, s.deq.rdy, s.deq )
|
|
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import xml.sax
import urllib, base64
import time
import boto.utils
from boto.connection import AWSAuthConnection
from boto import handler
from boto.s3.bucket import Bucket
from boto.s3.key import Key
from boto.resultset import ResultSet
from boto.exception import BotoClientError
def check_lowercase_bucketname(n):
"""
Bucket names must not contain uppercase characters. We check for
this by appending a lowercase character and testing with islower().
Note this also covers cases like numeric bucket names with dashes.
>>> check_lowercase_bucketname("Aaaa")
Traceback (most recent call last):
...
BotoClientError: S3Error: Bucket names cannot contain upper-case
characters when using either the sub-domain or virtual hosting calling
format.
>>> check_lowercase_bucketname("1234-5678-9123")
True
>>> check_lowercase_bucketname("abcdefg1234")
True
"""
if not (n + 'a').islower():
raise BotoClientError("Bucket names cannot contain upper-case " \
"characters when using either the sub-domain or virtual " \
"hosting calling format.")
return True
def assert_case_insensitive(f):
def wrapper(*args, **kwargs):
if len(args) == 3 and check_lowercase_bucketname(args[2]):
pass
return f(*args, **kwargs)
return wrapper
class _CallingFormat(object):
def get_bucket_server(self, server, bucket):
return ''
def build_url_base(self, connection, protocol, server, bucket, key=''):
url_base = '%s://' % protocol
url_base += self.build_host(server, bucket)
url_base += connection.get_path(self.build_path_base(bucket, key))
return url_base
def build_host(self, server, bucket):
if bucket == '':
return server
else:
return self.get_bucket_server(server, bucket)
def build_auth_path(self, bucket, key=''):
key = boto.utils.get_utf8_value(key)
path = ''
if bucket != '':
path = '/' + bucket
return path + '/%s' % urllib.quote(key)
def build_path_base(self, bucket, key=''):
key = boto.utils.get_utf8_value(key)
return '/%s' % urllib.quote(key)
class SubdomainCallingFormat(_CallingFormat):
@assert_case_insensitive
def get_bucket_server(self, server, bucket):
return '%s.%s' % (bucket, server)
class VHostCallingFormat(_CallingFormat):
@assert_case_insensitive
def get_bucket_server(self, server, bucket):
return bucket
class OrdinaryCallingFormat(_CallingFormat):
def get_bucket_server(self, server, bucket):
return server
def build_path_base(self, bucket, key=''):
key = boto.utils.get_utf8_value(key)
path_base = '/'
if bucket:
path_base += "%s/" % bucket
return path_base + urllib.quote(key)
class ProtocolIndependentOrdinaryCallingFormat(OrdinaryCallingFormat):
def build_url_base(self, connection, protocol, server, bucket, key=''):
url_base = '//'
url_base += self.build_host(server, bucket)
url_base += connection.get_path(self.build_path_base(bucket, key))
return url_base
class Location:
DEFAULT = '' # US Classic Region
EU = 'EU'
USWest = 'us-west-1'
USWest2 = 'us-west-2'
SAEast = 'sa-east-1'
APNortheast = 'ap-northeast-1'
APSoutheast = 'ap-southeast-1'
class S3Connection(AWSAuthConnection):
DefaultHost = 's3.amazonaws.com'
QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None,
host=DefaultHost, debug=0, https_connection_factory=None,
calling_format=SubdomainCallingFormat(), path='/',
provider='aws', bucket_class=Bucket, security_token=None,
suppress_consec_slashes=True, anon=False):
self.calling_format = calling_format
self.bucket_class = bucket_class
self.anon = anon
AWSAuthConnection.__init__(self, host,
aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
debug=debug, https_connection_factory=https_connection_factory,
path=path, provider=provider, security_token=security_token,
suppress_consec_slashes=suppress_consec_slashes)
def _required_auth_capability(self):
if self.anon:
return ['anon']
else:
return ['s3']
def __iter__(self):
for bucket in self.get_all_buckets():
yield bucket
def __contains__(self, bucket_name):
return not (self.lookup(bucket_name) is None)
def set_bucket_class(self, bucket_class):
"""
Set the Bucket class associated with this bucket. By default, this
would be the boto.s3.key.Bucket class but if you want to subclass that
for some reason this allows you to associate your new class.
:type bucket_class: class
:param bucket_class: A subclass of Bucket that can be more specific
"""
self.bucket_class = bucket_class
def build_post_policy(self, expiration_time, conditions):
"""
Taken from the AWS book Python examples and modified for use with boto
"""
assert type(expiration_time) == time.struct_time, \
'Policy document must include a valid expiration Time object'
# Convert conditions object mappings to condition statements
return '{"expiration": "%s",\n"conditions": [%s]}' % \
(time.strftime(boto.utils.ISO8601, expiration_time), ",".join(conditions))
def build_post_form_args(self, bucket_name, key, expires_in = 6000,
acl = None, success_action_redirect = None,
max_content_length = None,
http_method = "http", fields=None,
conditions=None):
"""
Taken from the AWS book Python examples and modified for use with boto
This only returns the arguments required for the post form, not the
actual form. This does not return the file input field which also
needs to be added
:type bucket_name: string
:param bucket_name: Bucket to submit to
:type key: string
:param key: Key name, optionally add ${filename} to the end to
attach the submitted filename
:type expires_in: integer
:param expires_in: Time (in seconds) before this expires, defaults
to 6000
:type acl: :class:`boto.s3.acl.ACL`
:param acl: ACL rule to use, if any
:type success_action_redirect: string
:param success_action_redirect: URL to redirect to on success
:type max_content_length: integer
:param max_content_length: Maximum size for this file
:type http_method: string
:param http_method: HTTP Method to use, "http" or "https"
:rtype: dict
:return: A dictionary containing field names/values as well as
a url to POST to
.. code-block:: python
{
"action": action_url_to_post_to,
"fields": [
{
"name": field_name,
"value": field_value
},
{
"name": field_name2,
"value": field_value2
}
]
}
"""
if fields == None:
fields = []
if conditions == None:
conditions = []
expiration = time.gmtime(int(time.time() + expires_in))
# Generate policy document
conditions.append('{"bucket": "%s"}' % bucket_name)
if key.endswith("${filename}"):
conditions.append('["starts-with", "$key", "%s"]' % key[:-len("${filename}")])
else:
conditions.append('{"key": "%s"}' % key)
if acl:
conditions.append('{"acl": "%s"}' % acl)
fields.append({ "name": "acl", "value": acl})
if success_action_redirect:
conditions.append('{"success_action_redirect": "%s"}' % success_action_redirect)
fields.append({ "name": "success_action_redirect", "value": success_action_redirect})
if max_content_length:
conditions.append('["content-length-range", 0, %i]' % max_content_length)
fields.append({"name":'content-length-range', "value": "0,%i" % max_content_length})
policy = self.build_post_policy(expiration, conditions)
# Add the base64-encoded policy document as the 'policy' field
policy_b64 = base64.b64encode(policy)
fields.append({"name": "policy", "value": policy_b64})
# Add the AWS access key as the 'AWSAccessKeyId' field
fields.append({"name": "AWSAccessKeyId",
"value": self.aws_access_key_id})
# Add signature for encoded policy document as the 'AWSAccessKeyId' field
signature = self._auth_handler.sign_string(policy_b64)
fields.append({"name": "signature", "value": signature})
fields.append({"name": "key", "value": key})
# HTTPS protocol will be used if the secure HTTP option is enabled.
url = '%s://%s/' % (http_method,
self.calling_format.build_host(self.server_name(),
bucket_name))
return {"action": url, "fields": fields}
def generate_url(self, expires_in, method, bucket='', key='', headers=None,
query_auth=True, force_http=False, response_headers=None,
expires_in_absolute=False, version_id=None):
headers = headers or {}
if expires_in_absolute:
expires = int(expires_in)
else:
expires = int(time.time() + expires_in)
auth_path = self.calling_format.build_auth_path(bucket, key)
auth_path = self.get_path(auth_path)
# optional version_id and response_headers need to be added to
# the query param list.
extra_qp = []
if version_id is not None:
extra_qp.append("versionId=%s" % version_id)
if response_headers:
for k, v in response_headers.items():
extra_qp.append("%s=%s" % (k, urllib.quote(v)))
if extra_qp:
delimiter = '?' if '?' not in auth_path else '&'
auth_path += delimiter + '&'.join(extra_qp)
c_string = boto.utils.canonical_string(method, auth_path, headers,
expires, self.provider)
b64_hmac = self._auth_handler.sign_string(c_string)
encoded_canonical = urllib.quote_plus(b64_hmac)
self.calling_format.build_path_base(bucket, key)
if query_auth:
query_part = '?' + self.QueryString % (encoded_canonical, expires,
self.aws_access_key_id)
else:
query_part = ''
if headers:
hdr_prefix = self.provider.header_prefix
for k, v in headers.items():
if k.startswith(hdr_prefix):
# headers used for sig generation must be
# included in the url also.
extra_qp.append("%s=%s" % (k, urllib.quote(v)))
if extra_qp:
delimiter = '?' if not query_part else '&'
query_part += delimiter + '&'.join(extra_qp)
if force_http:
protocol = 'http'
port = 80
else:
protocol = self.protocol
port = self.port
return self.calling_format.build_url_base(self, protocol,
self.server_name(port),
bucket, key) + query_part
def get_all_buckets(self, headers=None):
response = self.make_request('GET', headers=headers)
body = response.read()
if response.status > 300:
raise self.provider.storage_response_error(
response.status, response.reason, body)
rs = ResultSet([('Bucket', self.bucket_class)])
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
return rs
def get_canonical_user_id(self, headers=None):
"""
Convenience method that returns the "CanonicalUserID" of the
user who's credentials are associated with the connection.
The only way to get this value is to do a GET request on the
service which returns all buckets associated with the account.
As part of that response, the canonical userid is returned.
This method simply does all of that and then returns just the
user id.
:rtype: string
:return: A string containing the canonical user id.
"""
rs = self.get_all_buckets(headers=headers)
return rs.ID
def get_bucket(self, bucket_name, validate=True, headers=None):
bucket = self.bucket_class(self, bucket_name)
if validate:
bucket.get_all_keys(headers, maxkeys=0)
return bucket
def lookup(self, bucket_name, validate=True, headers=None):
try:
bucket = self.get_bucket(bucket_name, validate, headers=headers)
except:
bucket = None
return bucket
def create_bucket(self, bucket_name, headers=None,
location=Location.DEFAULT, policy=None):
"""
Creates a new located bucket. By default it's in the USA. You can pass
Location.EU to create an European bucket.
:type bucket_name: string
:param bucket_name: The name of the new bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to AWS.
:type location: :class:`boto.s3.connection.Location`
:param location: The location of the new bucket
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
"""
check_lowercase_bucketname(bucket_name)
if policy:
if headers:
headers[self.provider.acl_header] = policy
else:
headers = {self.provider.acl_header : policy}
if location == Location.DEFAULT:
data = ''
else:
data = '<CreateBucketConfiguration><LocationConstraint>' + \
location + '</LocationConstraint></CreateBucketConfiguration>'
response = self.make_request('PUT', bucket_name, headers=headers,
data=data)
body = response.read()
if response.status == 409:
raise self.provider.storage_create_error(
response.status, response.reason, body)
if response.status == 200:
return self.bucket_class(self, bucket_name)
else:
raise self.provider.storage_response_error(
response.status, response.reason, body)
def delete_bucket(self, bucket, headers=None):
response = self.make_request('DELETE', bucket, headers=headers)
body = response.read()
if response.status != 204:
raise self.provider.storage_response_error(
response.status, response.reason, body)
def make_request(self, method, bucket='', key='', headers=None, data='',
query_args=None, sender=None, override_num_retries=None):
if isinstance(bucket, self.bucket_class):
bucket = bucket.name
if isinstance(key, Key):
key = key.name
path = self.calling_format.build_path_base(bucket, key)
boto.log.debug('path=%s' % path)
auth_path = self.calling_format.build_auth_path(bucket, key)
boto.log.debug('auth_path=%s' % auth_path)
host = self.calling_format.build_host(self.server_name(), bucket)
if query_args:
path += '?' + query_args
boto.log.debug('path=%s' % path)
auth_path += '?' + query_args
boto.log.debug('auth_path=%s' % auth_path)
return AWSAuthConnection.make_request(self, method, path, headers,
data, host, auth_path, sender,
override_num_retries=override_num_retries)
|
|
import time, logging
from .exceptions import TimeoutError, ElementNotFoundError
__all__ = ['PageObject', 'get_page_object', 'cacheable']
_logger = logging.getLogger(__name__)
_page_singletons = {} # cache
def get_page_object(page_class, context):
fqcn = '%s.%s' % (page_class.__module__, page_class.__name__)
_logger.debug('Get page object; FQCN = %s', fqcn)
if fqcn in _page_singletons:
cache = _page_singletons[fqcn]
if cache._context is context:
_logger.debug('Cached, and the context remains unchanged.')
cache._invalidate_elements_cache()
return cache
else:
_logger.debug('Cached, but the context is invalid.')
page = page_class(context)
_page_singletons[fqcn] = page
return page
def _is_iterable(obj):
try:
iter(obj)
return True
except TypeError:
return False
def _to_iterable(obj):
return obj if _is_iterable(obj) else (obj,)
_NOT_FOUND_EXCEPTIONS = (ElementNotFoundError,)
_ELEMENTS_CACHE_ATTR = '_pyuia_elements_cache'
def cacheable(lookup, cache_none=True):
def func(self):
if not hasattr(self, _ELEMENTS_CACHE_ATTR):
setattr(self, _ELEMENTS_CACHE_ATTR, {}) # {callable_id: element(s)}
cache = getattr(self, _ELEMENTS_CACHE_ATTR)
key = id(lookup)
if key not in cache:
result = lookup(self)
if result is None and not cache_none: return
cache[key] = result
return cache[key]
return func
class PageObject(object):
_WAIT_INTERVAL = 0
_WARN_TIMEOUT = 5
_WAIT_TIMEOUT = 10
_PAGE_WARN_TIMEOUT = 5
_PAGE_WAIT_TIMEOUT = 10
def __init__(self, context, not_found_exceptions):
self._context = context
exceptions = list(_NOT_FOUND_EXCEPTIONS)
if not_found_exceptions:
try:
exceptions.extend(not_found_exceptions)
except TypeError: # not iterable
exceptions.append(not_found_exceptions)
self._not_found_exceptions = tuple(exceptions)
exceptions.append(AssertionError)
self._page_assertion_exceptions = tuple(exceptions)
def _go_to(self, page_class):
"""Instantiate a page object."""
page = get_page_object(page_class, self._context)
page._from_page_class = self.__class__
page.wait_for_page_loaded(self.__class__)
return page
def _back_to(self, page_class=None):
if not page_class:
if not hasattr(self, '_from_page_class'):
raise RuntimeError("_back_to(page_class) don't know where to go. You can explicitly specify "
"'page_class' or implement page transition with _go_to(page_class).")
page_class = self._from_page_class
page = get_page_object(page_class, self._context)
page.wait_for_page_loaded(self.__class__)
return page
def _invalidate_elements_cache(self):
if hasattr(self, _ELEMENTS_CACHE_ATTR):
delattr(self, _ELEMENTS_CACHE_ATTR)
def wait_for_page_loaded(self, from_page_class=None, timeout_warn=None, timeout=None):
timeout_warn = timeout_warn or self._PAGE_WARN_TIMEOUT
timeout = timeout or self._PAGE_WAIT_TIMEOUT
start_time = time.time()
timeout_warn = start_time + timeout_warn
timeout = start_time + timeout
handlers = self._get_page_entry_handlers(from_page_class)
warned = False
while True:
try:
self._invalidate_elements_cache()
self.assert_on_this_page(from_page_class)
break
except self._page_assertion_exceptions:
if not warned and time.time() > timeout_warn:
self._log_screenshot(
'Wait for page loaded. Time elapsed = [%s]s.',
time.time() - start_time,
level=logging.WARN)
warned = True
handlers = self._consult_handlers(handlers)
time.sleep(self._WAIT_INTERVAL)
if time.time() > timeout: raise
self._log_screenshot('Already on the page.')
# return True to indicate UI changed.
if self._on_page_entry(from_page_class):
self._log_screenshot('Page loaded.')
return self
def _get_page_entry_handlers(self, from_page_class): pass
def assert_on_this_page(self, from_page_class): pass
def _on_page_entry(self, from_page_class):
"""To put the page in a known state."""
pass
def _log_screenshot(self, msg, *args, **kwargs):
kwargs['page'] = self
self._context.log_screenshot(msg, *args, **kwargs)
def _log_page_source(self, msg, *args, **kwargs):
kwargs['page'] = self
self._context.log_page_source(msg, *args, **kwargs)
def _assert_present(self, locators, check_visibility=False):
single_loc = not _is_iterable(locators)
locators = _to_iterable(locators)
elements = []
for locator in locators:
try:
element = locator()
except self._not_found_exceptions as e:
_logger.debug(
'Assert ALL present. The locator (%s) did not resolve to '
'an element.', locator)
element = None
if not element:
assert False, locator # None or empty sequence
if check_visibility and not self._is_displayed(element):
assert False, locator
elements.append(element)
return elements[0] if single_loc else elements
def _assert_visible(self, locators):
return self._assert_present(locators, check_visibility=True)
def _assert_any_present(self, locators, check_visibility=False):
locators = _to_iterable(locators)
for locator in locators:
try:
element = locator()
except self._not_found_exceptions as e:
_logger.debug(
'Assert ANY present. The locator (%s) did not resolve to '
'an element.', locator)
element = None
if not element: continue # None or empty sequence
if check_visibility and not self._is_displayed(element): continue
return element
assert False, locators
def _assert_any_visible(self, locators):
return self._assert_any_present(locators, check_visibility=True)
def _wait_present(self, locators, timeout_warn=None, handlers=None,
timeout=None, check_visibility=False):
timeout_warn = timeout_warn or self._WARN_TIMEOUT
timeout = timeout or self._WAIT_TIMEOUT
start_time = time.time()
timeout_warn = start_time + timeout_warn
timeout = start_time + timeout
single_loc = not _is_iterable(locators)
locators = _to_iterable(locators)
warned = False
while True:
elements = []
for locator in locators:
try:
element = locator()
except self._not_found_exceptions as e:
_logger.debug(
'Wait ALL present. The locator (%s) did not resolve to '
'an element.', locator)
element = None
if not element: break # None or empty sequence
if check_visibility and not self._is_displayed(element): break
elements.append(element)
if len(elements) == len(locators):
return elements[0] if single_loc else elements
if not warned and time.time() > timeout_warn:
self._log_screenshot(
'Wait ALL elements to be present. locators = %s, '
'check_visibility = [%s], time elapsed = [%s]s.',
locators, check_visibility, time.time() - start_time,
level=logging.WARN)
warned = True
handlers = self._consult_handlers(handlers)
time.sleep(self._WAIT_INTERVAL)
if time.time() > timeout:
raise TimeoutError(
'Wait ALL elements to be present. locators = %s, '
'check_visibility = [%s], time elapsed = [%s]s.' %
(locators, check_visibility, time.time() - start_time))
def _wait_visible(self, locators, timeout_warn=None, handlers=None, timeout=None):
return self._wait_present(locators, timeout_warn, handlers, timeout, check_visibility=True)
def _wait_any_present(self, locators, timeout_warn=None, handlers=None,
timeout=None, check_visibility=False):
timeout_warn = timeout_warn or self._WARN_TIMEOUT
timeout = timeout or self._WAIT_TIMEOUT
start_time = time.time()
timeout_warn = start_time + timeout_warn
timeout = start_time + timeout
locators = _to_iterable(locators)
warned = False
while True:
for locator in locators:
try:
element = locator()
except self._not_found_exceptions as e:
_logger.debug(
'Wait ANY present. The locator (%s) did not resolve to '
'an element.', locator)
element = None
if not element: continue # None or empty sequence
if check_visibility and not self._is_displayed(element): continue
return element
if not warned and time.time() > timeout_warn:
self._log_screenshot(
'Wait ANY present. locators = %s, time elapsed = [%s]s.',
locators, time.time() - start_time, level=logging.WARN)
warned = True
handlers = self._consult_handlers(handlers)
time.sleep(self._WAIT_INTERVAL)
if time.time() > timeout:
raise TimeoutError(
'Wait ANY elements to be present. locators = %s, '
'check_visibility = [%s], time elapsed = [%s]s.' %
(locators, check_visibility, time.time() - start_time))
def _wait_any_visible(self, locators, timeout_warn=None, handlers=None,
timeout=None):
return self._wait_any_present(locators, timeout_warn, handlers, timeout, check_visibility=True)
def _wait_absent(self, locators, timeout_warn=None, minwait=3,
handlers=None, timeout=None, check_visibility_only=False):
timeout_warn = timeout_warn or self._WARN_TIMEOUT
timeout = timeout or self._WAIT_TIMEOUT
start_time = time.time()
timeout_appear = start_time + minwait
timeout_warn = start_time + timeout_warn
timeout = start_time + timeout
locators = _to_iterable(locators)
warned = False
while True:
# to avoid the situation that elements are absent simply because
# other elements such as error dialogs are displayed.
handlers = self._consult_handlers(handlers)
any_invalid = False
for locator in locators:
try:
element = locator()
except self._not_found_exceptions as e:
_logger.debug(
'Wait ALL absent. The locator (%s) did not resolve to '
'an element.', locator)
element = None
if not element: continue
if not check_visibility_only or self._is_displayed(element):
any_invalid = True
break
# wait for at least 'minwait' seconds to make sure target
# element(s) won't appear at this time.
if not any_invalid and time.time() > timeout_appear: return
if not warned and time.time() > timeout_warn:
self._log_screenshot(
'Wait ALL elements to be absent. locators = %s, '
'check_visibility_only = [%s], time elapsed = [%s]s.',
locators, check_visibility_only, time.time() - start_time,
level=logging.WARN)
warned = True
time.sleep(self._WAIT_INTERVAL)
if time.time() > timeout:
raise TimeoutError(
'Wait ALL elements to be absent. locators = %s, '
'check_visibility_only = [%s], time elapsed = [%s]s.' %
(locators, check_visibility_only, time.time() - start_time))
def _wait_invisible(self, locators, timeout_warn=None, minwait=3,
handlers=None, timeout=None):
self._wait_absent(locators, timeout_warn, minwait, handlers, timeout, check_visibility_only=True)
def _watch(self, handlers, max_duration=5):
timeout = time.time() + max_duration
while True:
handlers = self._consult_handlers(handlers)
if not handlers: break
if time.time() > timeout: break
def _consult_handlers(self, handlers):
if not handlers: return
# convert handlers to a mutable list of (locator, handler)
if isinstance(handlers, dict):
handlers = handlers.items()
handlers = list(handlers)
_logger.debug('Consult handlers. handlers = %s.', [h[0] for h in handlers])
# consult a handler at a time (rotation)
locator, handler = handlers[0]
try:
element = locator()
except self._not_found_exceptions as e:
_logger.debug('The locator (%s) did not resolve to an element.', locator)
element = None
# consult the handler again later, or drop it.
del handlers[0]
if not element or not self._is_displayed(element) or handler(element):
handlers.append((locator, handler))
_logger.debug('Rotated/modified handlers: %s', [h[0] for h in handlers])
return handlers
|
|
"""
Collection class which keeps itself sorted.
Taken from http://code.activestate.com/recipes/577197-sortedcollection/,
with some changes.
"""
from bisect import bisect_left, bisect_right
class SortedCollection(object):
'''Sequence sorted by a key function.
SortedCollection() is much easier to work with than using bisect() directly.
It supports key functions like those use in sorted(), min(), and max().
The result of the key function call is saved so that keys can be searched
efficiently.
Instead of returning an insertion-point which can be hard to interpret, the
five find-methods return a specific item in the sequence. They can scan for
exact matches, the last item less-than-or-equal to a key, or the first item
greater-than-or-equal to a key.
Once found, an item's ordinal position can be located with the index() method.
New items can be added with the insert() and insert_right() methods.
Old items can be deleted with the remove() method.
The usual sequence methods are provided to support indexing, slicing,
length lookup, clearing, copying, forward and reverse iteration, contains
checking, item counts, item removal, and a nice looking repr.
Finding and indexing are O(log n) operations while iteration and insertion
are O(n). The initial sort is O(n log n).
The key function is stored in the 'key' attibute for easy introspection or
so that you can assign a new key function (triggering an automatic re-sort).
In short, the class was designed to handle all of the common use cases for
bisect but with a simpler API and support for key functions.
>>> from pprint import pprint
>>> from operator import itemgetter
>>> s = SortedCollection(key=itemgetter(2))
>>> for record in [
... ('roger', 'young', 30),
... ('angela', 'jones', 28),
... ('bill', 'smith', 22),
... ('david', 'thomas', 32)]:
... s.insert(record)
>>> pprint(list(s)) # show records sorted by age
[('bill', 'smith', 22),
('angela', 'jones', 28),
('roger', 'young', 30),
('david', 'thomas', 32)]
>>> s.find_le(29) # find oldest person aged 29 or younger
('angela', 'jones', 28)
>>> s.find_lt(28) # find oldest person under 28
('bill', 'smith', 22)
>>> s.find_gt(28) # find youngest person over 28
('roger', 'young', 30)
>>> r = s.find_ge(32) # find youngest person aged 32 or older
>>> s.index(r) # get the index of their record
3
>>> s[3] # fetch the record at that index
('david', 'thomas', 32)
>>> s.key = itemgetter(0) # now sort by first name
>>> pprint(list(s))
[('angela', 'jones', 28),
('bill', 'smith', 22),
('david', 'thomas', 32),
('roger', 'young', 30)]
'''
def __init__(self, iterable=(), key=None):
self._given_key = key
key = (lambda x: x) if key is None else key
decorated = sorted((key(item), item) for item in iterable)
self._keys = [k for k, item in decorated]
self._items = [item for k, item in decorated]
self._key = key
def _getkey(self):
return self._key
def _setkey(self, key):
if key is not self._key:
self.__init__(self._items, key=key)
def _delkey(self):
self._setkey(None)
key = property(_getkey, _setkey, _delkey, 'key function')
def clear(self):
self.__init__([], self._key)
def copy(self):
return self.__class__(self, self._key)
def __len__(self):
return len(self._items)
def __getitem__(self, i):
return self._items[i]
def __iter__(self):
return iter(self._items)
def __reversed__(self):
return reversed(self._items)
def __repr__(self):
return '%s(%r, key=%s)' % (
self.__class__.__name__,
self._items,
getattr(self._given_key, '__name__', repr(self._given_key))
)
def __reduce__(self):
return self.__class__, (self._items, self._given_key)
def __contains__(self, item):
k = self._key(item)
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return item in self._items[i:j]
def index(self, item):
'Find the position of an item. Raise ValueError if not found.'
k = self._key(item)
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return self._items[i:j].index(item) + i
def count(self, item):
'Return number of occurrences of item'
k = self._key(item)
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return self._items[i:j].count(item)
def insert(self, item):
'Insert a new item. If equal keys are found, add to the left'
k = self._key(item)
i = bisect_left(self._keys, k)
self._keys.insert(i, k)
self._items.insert(i, item)
def insert_right(self, item):
'Insert a new item. If equal keys are found, add to the right'
k = self._key(item)
i = bisect_right(self._keys, k)
self._keys.insert(i, k)
self._items.insert(i, item)
def remove(self, item):
'Remove first occurence of item. Raise ValueError if not found'
i = self.index(item)
del self._keys[i]
del self._items[i]
def find(self, k):
'Return first item with a key == k. Raise ValueError if not found.'
i = bisect_left(self._keys, k)
if i != len(self) and self._keys[i] == k:
return self._items[i]
raise ValueError('No item found with key equal to: %r' % (k,))
def find_le(self, k):
'Return last item with a key <= k. Raise ValueError if not found.'
i = bisect_right(self._keys, k)
if i:
return self._items[i-1]
raise ValueError('No item found with key at or below: %r' % (k,))
def find_lt(self, k):
'Return last item with a key < k. Raise ValueError if not found.'
i = bisect_left(self._keys, k)
if i:
return self._items[i-1]
raise ValueError('No item found with key below: %r' % (k,))
def argfind_ge(self, k):
'Return the index of the first item with a key >= equal to k. Raise ValueError if not found'
i = bisect_left(self._keys, k)
if i != len(self):
return i
raise ValueError('No item found with key at or above: %r' % (k,))
def find_ge(self, k):
'Return first item with a key >= equal to k. Raise ValueError if not found'
return self._items[self.argfind_ge(k)]
def find_gt(self, k):
'Return first item with a key > k. Raise ValueError if not found'
i = bisect_right(self._keys, k)
if i != len(self):
return self._items[i]
raise ValueError('No item found with key above: %r' % (k,))
# --------------------------- Simple demo and tests -------------------------
if __name__ == '__main__':
def ve2no(f, *args):
'Convert ValueError result to -1'
try:
return f(*args)
except ValueError:
return -1
def slow_index(seq, k):
'Location of match or -1 if not found'
for i, item in enumerate(seq):
if item == k:
return i
return -1
def slow_find(seq, k):
'First item with a key equal to k. -1 if not found'
for item in seq:
if item == k:
return item
return -1
def slow_find_le(seq, k):
'Last item with a key less-than or equal to k.'
for item in reversed(seq):
if item <= k:
return item
return -1
def slow_find_lt(seq, k):
'Last item with a key less-than k.'
for item in reversed(seq):
if item < k:
return item
return -1
def slow_find_ge(seq, k):
'First item with a key-value greater-than or equal to k.'
for item in seq:
if item >= k:
return item
return -1
def slow_find_gt(seq, k):
'First item with a key-value greater-than or equal to k.'
for item in seq:
if item > k:
return item
return -1
from random import choice
pool = [1.5, 2, 2.0, 3, 3.0, 3.5, 4, 4.0, 4.5]
for i in range(500):
for n in range(6):
s = [choice(pool) for i in range(n)]
sc = SortedCollection(s)
s.sort()
for probe in pool:
assert repr(ve2no(sc.index, probe)) == repr(slow_index(s, probe))
assert repr(ve2no(sc.find, probe)) == repr(slow_find(s, probe))
assert repr(ve2no(sc.find_le, probe)) == repr(slow_find_le(s, probe))
assert repr(ve2no(sc.find_lt, probe)) == repr(slow_find_lt(s, probe))
assert repr(ve2no(sc.find_ge, probe)) == repr(slow_find_ge(s, probe))
assert repr(ve2no(sc.find_gt, probe)) == repr(slow_find_gt(s, probe))
for i, item in enumerate(s):
assert repr(item) == repr(sc[i]) # test __getitem__
assert item in sc # test __contains__ and __iter__
assert s.count(item) == sc.count(item) # test count()
assert len(sc) == n # test __len__
assert list(map(repr, reversed(sc))) == list(map(repr, reversed(s))) # test __reversed__
assert list(sc.copy()) == list(sc) # test copy()
sc.clear() # test clear()
assert len(sc) == 0
sd = SortedCollection('The quick Brown Fox jumped'.split(), key=str.lower)
assert sd._keys == ['brown', 'fox', 'jumped', 'quick', 'the']
assert sd._items == ['Brown', 'Fox', 'jumped', 'quick', 'The']
assert sd._key == str.lower
assert repr(sd) == "SortedCollection(['Brown', 'Fox', 'jumped', 'quick', 'The'], key=lower)"
sd.key = str.upper
assert sd._key == str.upper
assert len(sd) == 5
assert list(reversed(sd)) == ['The', 'quick', 'jumped', 'Fox', 'Brown']
for item in sd:
assert item in sd
for i, item in enumerate(sd):
assert item == sd[i]
sd.insert('jUmPeD')
sd.insert_right('QuIcK')
assert sd._keys ==['BROWN', 'FOX', 'JUMPED', 'JUMPED', 'QUICK', 'QUICK', 'THE']
assert sd._items == ['Brown', 'Fox', 'jUmPeD', 'jumped', 'quick', 'QuIcK', 'The']
assert sd.find_le('JUMPED') == 'jumped', sd.find_le('JUMPED')
assert sd.find_ge('JUMPED') == 'jUmPeD'
assert sd.find_le('GOAT') == 'Fox'
assert sd.find_ge('GOAT') == 'jUmPeD'
assert sd.find('FOX') == 'Fox'
assert sd[3] == 'jumped'
assert sd[3:5] ==['jumped', 'quick']
assert sd[-2] == 'QuIcK'
assert sd[-4:-2] == ['jumped', 'quick']
for i, item in enumerate(sd):
assert sd.index(item) == i
try:
sd.index('xyzpdq')
except ValueError:
pass
else:
assert 0, 'Oops, failed to notify of missing value'
sd.remove('jumped')
assert list(sd) == ['Brown', 'Fox', 'jUmPeD', 'quick', 'QuIcK', 'The']
import doctest
from operator import itemgetter
print(doctest.testmod())
|
|
#! /usr/bin/python
import asyncore
import pdb
import select
import sys
import multiprocessing
from websocket import create_connection, WebSocketException
import json
from PIL import Image
import base64
import numpy as np
import io, StringIO
import MyUtils
import threading
import time
import os
import logging
from NetworkProtocol import *
# receive is called synchronously
# all return result directly goes to recv and returned
class OpenFaceClient(object):
def __init__(self, server_ip=u"ws://localhost", server_port=9000):
self.logger=MyUtils.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
server_ip_port = server_ip + ':' +str(server_port)
self.ws=create_connection(server_ip_port)
# self.receive_thread = None
# self.receive_thread_running = None
#
# self.async=async
# if (self.async):
# self.receive_thread = threading.Thread(target=self.onReceive, name='receive_thread', args=(async_callback,))
# self.receive_thread_running = threading.Event()
# self.receive_thread_running.set()
# self.receive_thread.start()
# if not self.async:
def recv(self):
try:
resp = self.ws.recv()
return resp
except WebSocketException as e:
self.logger.debug("web socket error: {0}".format(e))
def addPerson(self,person):
msg = {
'type': FaceRecognitionServerProtocol.TYPE_add_person,
'val': person
}
msg = json.dumps(msg)
self.ws.send(msg)
def setTraining(self,training_on):
msg = {
'type': FaceRecognitionServerProtocol.TYPE_set_training,
'val': training_on
}
msg = json.dumps(msg)
self.ws.send(msg)
def reset(self):
msg = {
'type': FaceRecognitionServerProtocol.TYPE_set_state,
'images': {},
'people': [],
'training': False
}
msg = json.dumps(msg)
self.ws.send(msg)
def setState(self, state_string):
state_json = json.loads(state_string)
state_json['type']=FaceRecognitionServerProtocol.TYPE_set_state
msg = json.dumps(state_json)
self.ws.send(msg)
def getState(self):
msg = {
'type': FaceRecognitionServerProtocol.TYPE_get_state
}
msg = json.dumps(msg)
self.ws.send(msg)
return self.recv()
def getPeople(self):
msg = {
'type': FaceRecognitionServerProtocol.TYPE_get_people
}
out = json.dumps(msg)
self.ws.send(out)
return self.recv()
# current processing frame
def addFrame(self, data_url, name):
msg = {
'type': FaceRecognitionServerProtocol.TYPE_frame,
'dataURL': data_url,
'name': name
}
msg = json.dumps(msg)
self.ws.send(msg)
return self.recv()
def removePerson(self,name):
msg = {
'type': FaceRecognitionServerProtocol.TYPE_remove_person,
'val': name
}
msg = json.dumps(msg)
self.ws.send(msg)
# resp = self.recv()
# resp = json.loads(resp)
# return resp['success']
return self.recv()
def terminate(self):
self.ws.close()
def isTraining(self):
msg = {
'type': FaceRecognitionServerProtocol.TYPE_get_training,
}
msg = json.dumps(msg)
self.ws.send(msg)
# resp = self.recv()
# resp = json.loads(resp)
# if resp['type'] != 'IS_TRAINING':
# raise ValueError
# return resp['training']
return self.recv()
class AsyncOpenFaceClientProcess(OpenFaceClient):
def __init__(self, server_ip=u"ws://localhost", server_port=9000,
call_back=None,
queue=None,
busy_event=None ):
super(AsyncOpenFaceClientProcess, self).__init__(server_ip, server_port)
self.call_back_fun = call_back
self.shared_queue=queue
self.receive_process = multiprocessing.Process(target=self.async_on_receive,
name='receive_thread',
args=(self.ws.sock,
call_back,
queue,
busy_event,
))
self.receive_process_running = multiprocessing.Event()
self.receive_process_running.set()
self.receive_process.start()
def async_on_receive(self,
sock,
call_back,
queue,
busy_event):
input = [sock]
while True:
inputready,outputready,exceptready = select.select(input,[],[])
for s in inputready:
if s == self.ws.sock:
try:
resp = self.ws.recv()
# self.logger.debug('server said: {}'.format(resp[:50]))
if call_back is not None:
call_back(resp, queue=queue, busy_event=busy_event)
except WebSocketException as e:
self.logger.debug("web socket error: {0}".format(e))
# add id for sequencing
def addFrameWithID(self, data_url, name, frame_id):
# self.logger.debug('before send out request openfaceClient')
msg = {
'type': FaceRecognitionServerProtocol.TYPE_frame,
'dataURL': data_url,
'name': name,
'id': frame_id
}
msg = json.dumps(msg)
self.ws.send(msg)
# sys.stdout.write('successful sent frame msg: {}'.format(msg))
# sys.stdout.flush()
# self.logger.debug('after send out request openfaceClient')
return self.recv()
def recv(self):
pass
def terminate(self):
if None != self.receive_process_running:
self.receive_process_running.clear()
self.ws.close()
class AsyncOpenFaceClientThread(OpenFaceClient):
def __init__(self, server_ip=u"ws://localhost", server_port=9000,
call_back=None):
super(AsyncOpenFaceClientThread, self).__init__(server_ip, server_port)
self.call_back_fun = call_back
self.receive_thread = threading.Thread(target=self.async_on_receive,
name='receive_thread',
args=(call_back,))
self.receive_thread_running = threading.Event()
self.receive_thread_running.set()
self.receive_thread.start()
def async_on_receive(self, call_back):
input = [self.ws.sock]
while True:
inputready,outputready,exceptready = select.select(input,[],[])
for s in inputready:
if s == self.ws.sock:
try:
resp = self.ws.recv()
if call_back:
call_back(resp)
except WebSocketException as e:
self.logger.debug("web socket error: {0}".format(e))
def recv(self):
pass
def terminate(self):
if None != self.receive_thread_running:
self.receive_thread_running.clear()
self.ws.close()
if __name__ == '__main__':
# client = OpenFaceClient()
client = AsyncOpenFaceClientProcess()
# pdb.set_trace()
base_dir = '/home/junjuew/gabriel/gabriel/bin/img/'
people_dir = ['Hu_Jintao', 'Jeb_Bush']
test_dir = ['test']
client.getState()
for people in people_dir:
client.addPerson(people)
client.setTraining(True)
for (idx, pdir) in enumerate(people_dir):
cur_dir = base_dir+pdir
for (dirpath, dirnames, filenames) in os.walk(cur_dir):
for filename in filenames:
print 'adding file: {}'.format(filename)
image = Image.open(dirpath + '/' +filename)
image_output = StringIO.StringIO()
image.save(image_output, 'JPEG')
jpeg_image = image_output.getvalue()
face_string = base64.b64encode(jpeg_image)
face_string = "data:image/jpeg;base64," + face_string
client.addFrame(face_string, people_dir[idx])
client.setTraining(False)
for pdir in test_dir:
cur_dir = base_dir+pdir
for (dirpath, dirnames, filenames) in os.walk(cur_dir):
for filename in filenames:
print 'testing file: {}'.format(filename)
image = Image.open(dirpath + '/' +filename)
image_output = StringIO.StringIO()
image.save(image_output, 'JPEG')
jpeg_image = image_output.getvalue()
face_string = base64.b64encode(jpeg_image)
face_string = "data:image/jpeg;base64," + face_string
client.addFrame(face_string, 'test')
state=client.getState()
print state
# print client.setState(state)
time.sleep(100)
print 'waked up'
client.terminate()
# print "Sent"
# print "Receiving..."
# result = ws.recv()
# print "Received '%s'" % result
# ws.close()
# log.startLogging(sys.stdout)
# client = OpenFaceClient(server_ip)
# def getTSNE(self, people):
# msg = {
# 'type': 'REQ_TSNE',
# 'people': people
# }
# msg = json.dumps(msg)
# self.ws.send(msg)
# return self.recv()
# def setState(self,images, people, training_on):
# msg = {
# 'type': 'ALL_STATE',
# 'images': images,
# 'people': people,
# 'training': training_on
# }
# msg = json.dumps(msg)
# self.ws.send(msg)
# def onReceive(self, call_back):
# while (self.receive_thread_running.isSet()):
# try:
# resp = self.ws.recv()
# self.logger.debug('server said: {}'.format(resp))
# if call_back:
# call_back(resp)
# except WebSocketException as e:
# self.logger.debug("web socket error: {0}".format(e))
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Branch'
db.create_table('layerindex_branch', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('bitbake_branch', self.gf('django.db.models.fields.CharField')(max_length=50)),
('short_description', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('sort_priority', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
))
db.send_create_signal('layerindex', ['Branch'])
# Adding model 'LayerItem'
db.create_table('layerindex_layeritem', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=40)),
('status', self.gf('django.db.models.fields.CharField')(default='N', max_length=1)),
('layer_type', self.gf('django.db.models.fields.CharField')(max_length=1)),
('summary', self.gf('django.db.models.fields.CharField')(max_length=200)),
('description', self.gf('django.db.models.fields.TextField')()),
('vcs_url', self.gf('django.db.models.fields.CharField')(max_length=255)),
('vcs_web_url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('vcs_web_tree_base_url', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('vcs_web_file_base_url', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('usage_url', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('mailing_list_url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
))
db.send_create_signal('layerindex', ['LayerItem'])
# Adding model 'LayerBranch'
db.create_table('layerindex_layerbranch', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('layer', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['layerindex.LayerItem'])),
('branch', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['layerindex.Branch'])),
('vcs_subdir', self.gf('django.db.models.fields.CharField')(max_length=40, blank=True)),
('vcs_last_fetch', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('vcs_last_rev', self.gf('django.db.models.fields.CharField')(max_length=80, blank=True)),
('vcs_last_commit', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('layerindex', ['LayerBranch'])
# Adding model 'LayerMaintainer'
db.create_table('layerindex_layermaintainer', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('layerbranch', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['layerindex.LayerBranch'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('email', self.gf('django.db.models.fields.CharField')(max_length=255)),
('responsibility', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('status', self.gf('django.db.models.fields.CharField')(default='A', max_length=1)),
))
db.send_create_signal('layerindex', ['LayerMaintainer'])
# Adding model 'LayerDependency'
db.create_table('layerindex_layerdependency', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('layerbranch', self.gf('django.db.models.fields.related.ForeignKey')(related_name='dependencies_set', to=orm['layerindex.LayerBranch'])),
('dependency', self.gf('django.db.models.fields.related.ForeignKey')(related_name='dependents_set', to=orm['layerindex.LayerItem'])),
))
db.send_create_signal('layerindex', ['LayerDependency'])
# Adding model 'LayerNote'
db.create_table('layerindex_layernote', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('layer', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['layerindex.LayerItem'])),
('text', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('layerindex', ['LayerNote'])
# Adding model 'Recipe'
db.create_table('layerindex_recipe', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('layerbranch', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['layerindex.LayerBranch'])),
('filename', self.gf('django.db.models.fields.CharField')(max_length=255)),
('filepath', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('pn', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('pv', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('summary', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('section', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('license', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('homepage', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
))
db.send_create_signal('layerindex', ['Recipe'])
# Adding model 'RecipeFileDependency'
db.create_table('layerindex_recipefiledependency', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('recipe', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['layerindex.Recipe'])),
('layerbranch', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['layerindex.LayerBranch'])),
('path', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
))
db.send_create_signal('layerindex', ['RecipeFileDependency'])
# Adding model 'Machine'
db.create_table('layerindex_machine', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('layerbranch', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['layerindex.LayerBranch'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('description', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('layerindex', ['Machine'])
def backwards(self, orm):
# Deleting model 'Branch'
db.delete_table('layerindex_branch')
# Deleting model 'LayerItem'
db.delete_table('layerindex_layeritem')
# Deleting model 'LayerBranch'
db.delete_table('layerindex_layerbranch')
# Deleting model 'LayerMaintainer'
db.delete_table('layerindex_layermaintainer')
# Deleting model 'LayerDependency'
db.delete_table('layerindex_layerdependency')
# Deleting model 'LayerNote'
db.delete_table('layerindex_layernote')
# Deleting model 'Recipe'
db.delete_table('layerindex_recipe')
# Deleting model 'RecipeFileDependency'
db.delete_table('layerindex_recipefiledependency')
# Deleting model 'Machine'
db.delete_table('layerindex_machine')
models = {
'layerindex.branch': {
'Meta': {'object_name': 'Branch'},
'bitbake_branch': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'sort_priority': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'layerindex.layerbranch': {
'Meta': {'object_name': 'LayerBranch'},
'branch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['layerindex.Branch']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['layerindex.LayerItem']"}),
'vcs_last_commit': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'vcs_last_fetch': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'vcs_last_rev': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'vcs_subdir': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'})
},
'layerindex.layerdependency': {
'Meta': {'object_name': 'LayerDependency'},
'dependency': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dependents_set'", 'to': "orm['layerindex.LayerItem']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layerbranch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dependencies_set'", 'to': "orm['layerindex.LayerBranch']"})
},
'layerindex.layeritem': {
'Meta': {'object_name': 'LayerItem'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layer_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'mailing_list_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'usage_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'vcs_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcs_web_file_base_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'vcs_web_tree_base_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'vcs_web_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'layerindex.layermaintainer': {
'Meta': {'object_name': 'LayerMaintainer'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layerbranch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['layerindex.LayerBranch']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'responsibility': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '1'})
},
'layerindex.layernote': {
'Meta': {'object_name': 'LayerNote'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['layerindex.LayerItem']"}),
'text': ('django.db.models.fields.TextField', [], {})
},
'layerindex.machine': {
'Meta': {'object_name': 'Machine'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layerbranch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['layerindex.LayerBranch']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'layerindex.recipe': {
'Meta': {'object_name': 'Recipe'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'filepath': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layerbranch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['layerindex.LayerBranch']"}),
'license': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'pn': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'pv': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'section': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
},
'layerindex.recipefiledependency': {
'Meta': {'object_name': 'RecipeFileDependency'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layerbranch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['layerindex.LayerBranch']"}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'recipe': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['layerindex.Recipe']"})
}
}
complete_apps = ['layerindex']
|
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from __future__ import print_function
import glob
import io
import os
import platform
import subprocess
import sys
from shutil import copytree, copy, rmtree
from setuptools import setup
def remove_if_exists(file_path):
if os.path.exists(file_path):
if os.path.islink(file_path) or os.path.isfile(file_path):
os.remove(file_path)
else:
assert os.path.isdir(file_path)
rmtree(file_path)
def find_file_path(pattern):
files = glob.glob(pattern)
if len(files) < 1:
print("Failed to find the file %s." % pattern)
exit(-1)
if len(files) > 1:
print("The file pattern %s is ambiguous: %s" % (pattern, files))
exit(-1)
return files[0]
in_flink_source = os.path.isfile("../../flink-java/src/main/java/org/apache/flink/api/java/"
"ExecutionEnvironment.java")
this_directory = os.path.abspath(os.path.dirname(__file__))
pyflink_directory = os.path.join(this_directory, "pyflink")
if in_flink_source:
remove_if_exists(pyflink_directory)
os.mkdir(pyflink_directory)
version_file = os.path.join(this_directory, '../pyflink/version.py')
else:
version_file = os.path.join(this_directory, 'pyflink/version.py')
try:
exec(open(version_file).read())
except IOError:
print("Failed to load PyFlink version file for packaging. " +
"'%s' not found!" % version_file,
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
with io.open(os.path.join(this_directory, 'README.md'), 'r', encoding='utf-8') as f:
long_description = f.read()
TEMP_PATH = "deps"
LIB_TEMP_PATH = os.path.join(TEMP_PATH, "lib")
OPT_TEMP_PATH = os.path.join(TEMP_PATH, "opt")
CONF_TEMP_PATH = os.path.join(TEMP_PATH, "conf")
LICENSES_TEMP_PATH = os.path.join(TEMP_PATH, "licenses")
PLUGINS_TEMP_PATH = os.path.join(TEMP_PATH, "plugins")
SCRIPTS_TEMP_PATH = os.path.join(TEMP_PATH, "bin")
LICENSE_FILE_TEMP_PATH = os.path.join(this_directory, "LICENSE")
NOTICE_FILE_TEMP_PATH = os.path.join(this_directory, "NOTICE")
README_FILE_TEMP_PATH = os.path.join("pyflink", "README.txt")
VERSION_FILE_TEMP_PATH = os.path.join("pyflink", "version.py")
# Due to changes in FLINK-14008, the licenses directory and NOTICE file may not exist in
# build-target folder. Just ignore them in this case.
exist_licenses = None
try:
if in_flink_source:
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
sys.exit(-1)
flink_version = VERSION.replace(".dev0", "-SNAPSHOT")
FLINK_HOME = os.path.abspath(
"../../flink-dist/target/flink-%s-bin/flink-%s" % (flink_version, flink_version))
incorrect_invocation_message = """
If you are installing pyflink from flink source, you must first build Flink and
run sdist.
To build Flink with maven you can run:
mvn -DskipTests clean package
Building the source dist is done in the flink-python directory:
cd flink-python
cd apache-flink-libraries
python setup.py sdist
pip install dist/*.tar.gz"""
LIB_PATH = os.path.join(FLINK_HOME, "lib")
OPT_PATH = os.path.join(FLINK_HOME, "opt")
OPT_PYTHON_JAR_NAME = os.path.basename(
find_file_path(os.path.join(OPT_PATH, "flink-python_*.jar")))
OPT_SQL_CLIENT_JAR_NAME = os.path.basename(
find_file_path(os.path.join(OPT_PATH, "flink-sql-client*.jar")))
LICENSES_PATH = os.path.join(FLINK_HOME, "licenses")
PLUGINS_PATH = os.path.join(FLINK_HOME, "plugins")
SCRIPTS_PATH = os.path.join(FLINK_HOME, "bin")
LICENSE_FILE_PATH = os.path.join(FLINK_HOME, "LICENSE")
README_FILE_PATH = os.path.join(FLINK_HOME, "README.txt")
VERSION_FILE_PATH = os.path.join(this_directory, "../pyflink/version.py")
exist_licenses = os.path.exists(LICENSES_PATH)
if not os.path.isdir(LIB_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
try:
os.symlink(LIB_PATH, LIB_TEMP_PATH)
support_symlinks = True
except BaseException: # pylint: disable=broad-except
support_symlinks = False
os.mkdir(OPT_TEMP_PATH)
if support_symlinks:
os.symlink(os.path.join(OPT_PATH, OPT_PYTHON_JAR_NAME),
os.path.join(OPT_TEMP_PATH, OPT_PYTHON_JAR_NAME))
os.symlink(os.path.join(OPT_PATH, OPT_SQL_CLIENT_JAR_NAME),
os.path.join(OPT_TEMP_PATH, OPT_SQL_CLIENT_JAR_NAME))
os.symlink(PLUGINS_PATH, PLUGINS_TEMP_PATH)
os.symlink(LICENSE_FILE_PATH, LICENSE_FILE_TEMP_PATH)
os.symlink(README_FILE_PATH, README_FILE_TEMP_PATH)
os.symlink(VERSION_FILE_PATH, VERSION_FILE_TEMP_PATH)
else:
copytree(LIB_PATH, LIB_TEMP_PATH)
copy(os.path.join(OPT_PATH, OPT_PYTHON_JAR_NAME),
os.path.join(OPT_TEMP_PATH, OPT_PYTHON_JAR_NAME))
copy(os.path.join(OPT_PATH, OPT_SQL_CLIENT_JAR_NAME),
os.path.join(OPT_TEMP_PATH, OPT_SQL_CLIENT_JAR_NAME))
copytree(PLUGINS_PATH, PLUGINS_TEMP_PATH)
copy(LICENSE_FILE_PATH, LICENSE_FILE_TEMP_PATH)
copy(README_FILE_PATH, README_FILE_TEMP_PATH)
copy(VERSION_FILE_PATH, VERSION_FILE_TEMP_PATH)
os.mkdir(SCRIPTS_TEMP_PATH)
bin_jars = glob.glob(os.path.join(SCRIPTS_PATH, "*.jar"))
for bin_jar in bin_jars:
copy(bin_jar, os.path.join(SCRIPTS_TEMP_PATH, os.path.basename(bin_jar)))
if exist_licenses and platform.system() != "Windows":
# regenerate the licenses directory and NOTICE file as we only copy part of the
# flink binary distribution.
collect_licenses_file_sh = os.path.abspath(os.path.join(
this_directory, "..", "..", "tools", "releasing", "collect_license_files.sh"))
subprocess.check_output([collect_licenses_file_sh, TEMP_PATH, TEMP_PATH])
# move the NOTICE file to the root of the package
GENERATED_NOTICE_FILE_PATH = os.path.join(TEMP_PATH, "NOTICE")
os.rename(GENERATED_NOTICE_FILE_PATH, NOTICE_FILE_TEMP_PATH)
else:
if not os.path.isdir(LIB_TEMP_PATH) or not os.path.isdir(OPT_TEMP_PATH):
print("The flink core files are not found. Please make sure your installation package "
"is complete, or do this in the flink-python directory of the flink source "
"directory.")
sys.exit(-1)
exist_licenses = os.path.exists(LICENSES_TEMP_PATH)
PACKAGES = ['pyflink',
'pyflink.bin',
'pyflink.lib',
'pyflink.opt',
'pyflink.plugins']
PACKAGE_DIR = {
'pyflink.bin': TEMP_PATH + '/bin',
'pyflink.lib': TEMP_PATH + '/lib',
'pyflink.opt': TEMP_PATH + '/opt',
'pyflink.plugins': TEMP_PATH + '/plugins'}
PACKAGE_DATA = {
'pyflink': ['README.txt', 'version.py'],
'pyflink.bin': ['*.jar'],
'pyflink.lib': ['*.jar'],
'pyflink.opt': ['*.*', '*/*'],
'pyflink.plugins': ['*', '*/*']}
if exist_licenses and platform.system() != "Windows":
PACKAGES.append('pyflink.licenses')
PACKAGE_DIR['pyflink.licenses'] = TEMP_PATH + '/licenses'
PACKAGE_DATA['pyflink.licenses'] = ['*']
setup(
name='apache-flink-libraries',
version=VERSION,
packages=PACKAGES,
include_package_data=True,
package_dir=PACKAGE_DIR,
package_data=PACKAGE_DATA,
url='https://flink.apache.org',
license='https://www.apache.org/licenses/LICENSE-2.0',
author='Apache Software Foundation',
author_email='[email protected]',
python_requires='>=3.6',
description='Apache Flink Libraries',
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'],
)
finally:
if in_flink_source:
remove_if_exists(TEMP_PATH)
remove_if_exists(LICENSE_FILE_TEMP_PATH)
remove_if_exists(NOTICE_FILE_TEMP_PATH)
remove_if_exists(README_FILE_TEMP_PATH)
remove_if_exists(VERSION_FILE_TEMP_PATH)
remove_if_exists(pyflink_directory)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources import signal_responder
from heat.engine import support
from heat.scaling import cooldown
from heat.scaling import scalingutil as sc_util
LOG = logging.getLogger(__name__)
class AutoScalingPolicy(signal_responder.SignalResponder,
cooldown.CooldownMixin):
"""A resource to manage scaling of `OS::Heat::AutoScalingGroup`.
**Note** while it may incidentally support
`AWS::AutoScaling::AutoScalingGroup` for now, please don't use it for that
purpose and use `AWS::AutoScaling::ScalingPolicy` instead.
Resource to manage scaling for `OS::Heat::AutoScalingGroup`, i.e. define
which metric should be scaled and scaling adjustment, set cooldown etc.
"""
PROPERTIES = (
AUTO_SCALING_GROUP_NAME, SCALING_ADJUSTMENT, ADJUSTMENT_TYPE,
COOLDOWN, MIN_ADJUSTMENT_STEP
) = (
'auto_scaling_group_id', 'scaling_adjustment', 'adjustment_type',
'cooldown', 'min_adjustment_step',
)
ATTRIBUTES = (
ALARM_URL, SIGNAL_URL
) = (
'alarm_url', 'signal_url'
)
properties_schema = {
# TODO(Qiming): property name should be AUTO_SCALING_GROUP_ID
AUTO_SCALING_GROUP_NAME: properties.Schema(
properties.Schema.STRING,
_('AutoScaling group ID to apply policy to.'),
required=True
),
SCALING_ADJUSTMENT: properties.Schema(
properties.Schema.NUMBER,
_('Size of adjustment.'),
required=True,
update_allowed=True
),
ADJUSTMENT_TYPE: properties.Schema(
properties.Schema.STRING,
_('Type of adjustment (absolute or percentage).'),
required=True,
constraints=[
constraints.AllowedValues(
[sc_util.CHANGE_IN_CAPACITY,
sc_util.EXACT_CAPACITY,
sc_util.PERCENT_CHANGE_IN_CAPACITY]),
],
update_allowed=True
),
COOLDOWN: properties.Schema(
properties.Schema.NUMBER,
_('Cooldown period, in seconds.'),
update_allowed=True
),
MIN_ADJUSTMENT_STEP: properties.Schema(
properties.Schema.INTEGER,
_('Minimum number of resources that are added or removed '
'when the AutoScaling group scales up or down. This can '
'be used only when specifying percent_change_in_capacity '
'for the adjustment_type property.'),
constraints=[
constraints.Range(
min=0,
),
],
update_allowed=True
),
}
attributes_schema = {
ALARM_URL: attributes.Schema(
_("A signed url to handle the alarm."),
type=attributes.Schema.STRING
),
SIGNAL_URL: attributes.Schema(
_("A url to handle the alarm using native API."),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.STRING
),
}
def validate(self):
"""Add validation for min_adjustment_step."""
super(AutoScalingPolicy, self).validate()
self._validate_min_adjustment_step()
def _validate_min_adjustment_step(self):
adjustment_type = self.properties.get(self.ADJUSTMENT_TYPE)
adjustment_step = self.properties.get(self.MIN_ADJUSTMENT_STEP)
if (adjustment_type != sc_util.PERCENT_CHANGE_IN_CAPACITY
and adjustment_step is not None):
raise exception.ResourcePropertyValueDependency(
prop1=self.MIN_ADJUSTMENT_STEP,
prop2=self.ADJUSTMENT_TYPE,
value=sc_util.PERCENT_CHANGE_IN_CAPACITY)
def handle_create(self):
super(AutoScalingPolicy, self).handle_create()
self.resource_id_set(self._get_user_id())
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
"""Updates self.properties, if Properties has changed.
If Properties has changed, update self.properties, so we get the new
values during any subsequent adjustment.
"""
if prop_diff:
self.properties = json_snippet.properties(self.properties_schema,
self.context)
def handle_signal(self, details=None):
# ceilometer sends details like this:
# {u'alarm_id': ID, u'previous': u'ok', u'current': u'alarm',
# u'reason': u'...'})
# in this policy we currently assume that this gets called
# only when there is an alarm. But the template writer can
# put the policy in all the alarm notifiers (nodata, and ok).
#
# our watchrule has upper case states so lower() them all.
if details is None:
alarm_state = 'alarm'
else:
alarm_state = details.get('current',
details.get('state', 'alarm')).lower()
LOG.info(_LI('Alarm %(name)s, new state %(state)s'),
{'name': self.name, 'state': alarm_state})
if alarm_state != 'alarm':
raise exception.NoActionRequired()
if self._cooldown_inprogress():
LOG.info(_LI("%(name)s NOT performing scaling action, "
"cooldown %(cooldown)s"),
{'name': self.name,
'cooldown': self.properties[self.COOLDOWN]})
raise exception.NoActionRequired()
asgn_id = self.properties[self.AUTO_SCALING_GROUP_NAME]
group = self.stack.resource_by_refid(asgn_id)
try:
if group is None:
raise exception.NotFound(_('Alarm %(alarm)s could not find '
'scaling group named "%(group)s"'
) % {'alarm': self.name,
'group': asgn_id})
LOG.info(_LI('%(name)s Alarm, adjusting Group %(group)s with id '
'%(asgn_id)s by %(filter)s'),
{'name': self.name, 'group': group.name,
'asgn_id': asgn_id,
'filter': self.properties[self.SCALING_ADJUSTMENT]})
group.adjust(self.properties[self.SCALING_ADJUSTMENT],
self.properties[self.ADJUSTMENT_TYPE],
self.properties[self.MIN_ADJUSTMENT_STEP],
signal=True)
finally:
self._cooldown_timestamp("%s : %s" % (
self.properties[self.ADJUSTMENT_TYPE],
self.properties[self.SCALING_ADJUSTMENT]))
def _resolve_attribute(self, name):
if self.resource_id is None:
return
if name == self.ALARM_URL:
return six.text_type(self._get_ec2_signed_url())
elif name == self.SIGNAL_URL:
return six.text_type(self._get_heat_signal_url())
def get_reference_id(self):
return resource.Resource.get_reference_id(self)
def resource_mapping():
return {
'OS::Heat::ScalingPolicy': AutoScalingPolicy,
}
|
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 20222 if testnet else 10222
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
|
"""Logging utilities for Certbot.
The best way to use this module is through `pre_arg_parse_setup` and
`post_arg_parse_setup`. `pre_arg_parse_setup` configures a minimal
terminal logger and ensures a detailed log is written to a secure
temporary file if Certbot exits before `post_arg_parse_setup` is called.
`post_arg_parse_setup` relies on the parsed command line arguments and
does the full logging setup with terminal and rotating file handling as
configured by the user. Any logged messages before
`post_arg_parse_setup` is called are sent to the rotating file handler.
Special care is taken by both methods to ensure all errors are logged
and properly flushed before program exit.
The `logging` module is useful for recording messages about about what
Certbot is doing under the hood, but do not necessarily need to be shown
to the user on the terminal. The default verbosity is WARNING.
The preferred method to display important information to the user is to
use `certbot.display.util` and `certbot.display.ops`.
"""
import functools
import logging
import logging.handlers
import shutil
import sys
import tempfile
import traceback
from types import TracebackType
from typing import IO
from acme import messages
from certbot import errors
from certbot import util
from certbot._internal import constants
from certbot.compat import os
# Logging format
CLI_FMT = "%(message)s"
FILE_FMT = "%(asctime)s:%(levelname)s:%(name)s:%(message)s"
logger = logging.getLogger(__name__)
def pre_arg_parse_setup():
"""Setup logging before command line arguments are parsed.
Terminal logging is setup using
`certbot._internal.constants.QUIET_LOGGING_LEVEL` so Certbot is as quiet as
possible. File logging is setup so that logging messages are
buffered in memory. If Certbot exits before `post_arg_parse_setup`
is called, these buffered messages are written to a temporary file.
If Certbot doesn't exit, `post_arg_parse_setup` writes the messages
to the normal log files.
This function also sets `logging.shutdown` to be called on program
exit which automatically flushes logging handlers and
`sys.excepthook` to properly log/display fatal exceptions.
"""
temp_handler = TempHandler()
temp_handler.setFormatter(logging.Formatter(FILE_FMT))
temp_handler.setLevel(logging.DEBUG)
memory_handler = MemoryHandler(temp_handler)
stream_handler = ColoredStreamHandler()
stream_handler.setFormatter(logging.Formatter(CLI_FMT))
stream_handler.setLevel(constants.QUIET_LOGGING_LEVEL)
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG) # send all records to handlers
root_logger.addHandler(memory_handler)
root_logger.addHandler(stream_handler)
# logging.shutdown will flush the memory handler because flush() and
# close() are explicitly called
util.atexit_register(logging.shutdown)
sys.excepthook = functools.partial(
pre_arg_parse_except_hook, memory_handler,
debug='--debug' in sys.argv,
quiet='--quiet' in sys.argv or '-q' in sys.argv,
log_path=temp_handler.path)
def post_arg_parse_setup(config):
"""Setup logging after command line arguments are parsed.
This function assumes `pre_arg_parse_setup` was called earlier and
the root logging configuration has not been modified. A rotating
file logging handler is created and the buffered log messages are
sent to that handler. Terminal logging output is set to the level
requested by the user.
:param certbot.configuration.NamespaceConfig config: Configuration object
"""
file_handler, file_path = setup_log_file_handler(
config, 'letsencrypt.log', FILE_FMT)
root_logger = logging.getLogger()
memory_handler = stderr_handler = None
for handler in root_logger.handlers:
if isinstance(handler, ColoredStreamHandler):
stderr_handler = handler
elif isinstance(handler, MemoryHandler):
memory_handler = handler
msg = 'Previously configured logging handlers have been removed!'
assert memory_handler is not None and stderr_handler is not None, msg
root_logger.addHandler(file_handler)
root_logger.removeHandler(memory_handler)
temp_handler = getattr(memory_handler, 'target', None)
memory_handler.setTarget(file_handler) # pylint: disable=no-member
memory_handler.flush(force=True) # pylint: disable=unexpected-keyword-arg
memory_handler.close()
if temp_handler:
temp_handler.close()
if config.quiet:
level = constants.QUIET_LOGGING_LEVEL
elif config.verbose_level is not None:
level = constants.DEFAULT_LOGGING_LEVEL - int(config.verbose_level) * 10
else:
level = constants.DEFAULT_LOGGING_LEVEL - config.verbose_count * 10
stderr_handler.setLevel(level)
logger.debug('Root logging level set at %d', level)
if not config.quiet:
print(f'Saving debug log to {file_path}', file=sys.stderr)
sys.excepthook = functools.partial(
post_arg_parse_except_hook,
debug=config.debug, quiet=config.quiet, log_path=file_path)
def setup_log_file_handler(config, logfile, fmt):
"""Setup file debug logging.
:param certbot.configuration.NamespaceConfig config: Configuration object
:param str logfile: basename for the log file
:param str fmt: logging format string
:returns: file handler and absolute path to the log file
:rtype: tuple
"""
# TODO: logs might contain sensitive data such as contents of the
# private key! #525
util.set_up_core_dir(config.logs_dir, 0o700, config.strict_permissions)
log_file_path = os.path.join(config.logs_dir, logfile)
try:
handler = logging.handlers.RotatingFileHandler(
log_file_path, maxBytes=2 ** 20,
backupCount=config.max_log_backups)
except IOError as error:
raise errors.Error(util.PERM_ERR_FMT.format(error))
# rotate on each invocation, rollover only possible when maxBytes
# is nonzero and backupCount is nonzero, so we set maxBytes as big
# as possible not to overrun in single CLI invocation (1MB).
handler.doRollover() # TODO: creates empty letsencrypt.log.1 file
handler.setLevel(logging.DEBUG)
handler_formatter = logging.Formatter(fmt=fmt)
handler.setFormatter(handler_formatter)
return handler, log_file_path
class ColoredStreamHandler(logging.StreamHandler):
"""Sends colored logging output to a stream.
If the specified stream is not a tty, the class works like the
standard `logging.StreamHandler`. Default red_level is
`logging.WARNING`.
:ivar bool colored: True if output should be colored
:ivar bool red_level: The level at which to output
"""
def __init__(self, stream=None):
super().__init__(stream)
self.colored = (sys.stderr.isatty() if stream is None else
stream.isatty())
self.red_level = logging.WARNING
def format(self, record):
"""Formats the string representation of record.
:param logging.LogRecord record: Record to be formatted
:returns: Formatted, string representation of record
:rtype: str
"""
out = super().format(record)
if self.colored and record.levelno >= self.red_level:
return ''.join((util.ANSI_SGR_RED, out, util.ANSI_SGR_RESET))
return out
class MemoryHandler(logging.handlers.MemoryHandler):
"""Buffers logging messages in memory until the buffer is flushed.
This differs from `logging.handlers.MemoryHandler` in that flushing
only happens when flush(force=True) is called.
"""
def __init__(self, target=None, capacity=10000):
# capacity doesn't matter because should_flush() is overridden
super().__init__(capacity, target=target)
def close(self):
"""Close the memory handler, but don't set the target to None."""
# This allows the logging module which may only have a weak
# reference to the target handler to properly flush and close it.
target = getattr(self, 'target')
super().close()
self.target = target
def flush(self, force=False): # pylint: disable=arguments-differ
"""Flush the buffer if force=True.
If force=False, this call is a noop.
:param bool force: True if the buffer should be flushed.
"""
# This method allows flush() calls in logging.shutdown to be a
# noop so we can control when this handler is flushed.
if force:
super().flush()
def shouldFlush(self, record):
"""Should the buffer be automatically flushed?
:param logging.LogRecord record: log record to be considered
:returns: False because the buffer should never be auto-flushed
:rtype: bool
"""
return False
class TempHandler(logging.StreamHandler):
"""Safely logs messages to a temporary file.
The file is created with permissions 600. If no log records are sent
to this handler, the temporary file is deleted when the handler is
closed.
:ivar str path: file system path to the temporary log file
"""
def __init__(self):
self._workdir = tempfile.mkdtemp()
self.path = os.path.join(self._workdir, 'log')
stream = util.safe_open(self.path, mode='w', chmod=0o600)
super().__init__(stream)
# Super constructor assigns the provided stream object to self.stream.
# Let's help mypy be aware of this by giving a type hint.
self.stream: IO[str]
self._delete = True
def emit(self, record):
"""Log the specified logging record.
:param logging.LogRecord record: Record to be formatted
"""
self._delete = False
super().emit(record)
def close(self):
"""Close the handler and the temporary log file.
The temporary log file is deleted if it wasn't used.
"""
self.acquire()
try:
# StreamHandler.close() doesn't close the stream to allow a
# stream like stderr to be used
self.stream.close()
if self._delete:
shutil.rmtree(self._workdir)
self._delete = False
super().close()
finally:
self.release()
def pre_arg_parse_except_hook(memory_handler, *args, **kwargs):
"""A simple wrapper around post_arg_parse_except_hook.
The additional functionality provided by this wrapper is the memory
handler will be flushed before Certbot exits. This allows us to
write logging messages to a temporary file if we crashed before
logging was fully configured.
Since sys.excepthook isn't called on SystemExit exceptions, the
memory handler will not be flushed in this case which prevents us
from creating temporary log files when argparse exits because a
command line argument was invalid or -h, --help, or --version was
provided on the command line.
:param MemoryHandler memory_handler: memory handler to flush
:param tuple args: args for post_arg_parse_except_hook
:param dict kwargs: kwargs for post_arg_parse_except_hook
"""
try:
post_arg_parse_except_hook(*args, **kwargs)
finally:
# flush() is called here so messages logged during
# post_arg_parse_except_hook are also flushed.
memory_handler.flush(force=True)
def post_arg_parse_except_hook(exc_type: type, exc_value: BaseException, trace: TracebackType,
debug: bool, quiet: bool, log_path: str):
"""Logs fatal exceptions and reports them to the user.
If debug is True, the full exception and traceback is shown to the
user, otherwise, it is suppressed. sys.exit is always called with a
nonzero status.
:param type exc_type: type of the raised exception
:param BaseException exc_value: raised exception
:param traceback trace: traceback of where the exception was raised
:param bool debug: True if the traceback should be shown to the user
:param bool quiet: True if Certbot is running in quiet mode
:param str log_path: path to file or directory containing the log
"""
exc_info = (exc_type, exc_value, trace)
# Only print human advice if not running under --quiet
exit_func = lambda: sys.exit(1) if quiet else exit_with_advice(log_path)
# constants.QUIET_LOGGING_LEVEL or higher should be used to
# display message the user, otherwise, a lower level like
# logger.DEBUG should be used
if debug or not issubclass(exc_type, Exception):
assert constants.QUIET_LOGGING_LEVEL <= logging.ERROR
if exc_type is KeyboardInterrupt:
logger.error('Exiting due to user request.')
sys.exit(1)
logger.error('Exiting abnormally:', exc_info=exc_info)
else:
logger.debug('Exiting abnormally:', exc_info=exc_info)
# Use logger to print the error message to take advantage of
# our logger printing warnings and errors in red text.
if issubclass(exc_type, errors.Error):
logger.error(str(exc_value))
exit_func()
logger.error('An unexpected error occurred:')
if messages.is_acme_error(exc_value):
# Remove the ACME error prefix from the exception
_, _, exc_str = str(exc_value).partition(':: ')
logger.error(exc_str)
else:
output = traceback.format_exception_only(exc_type, exc_value)
# format_exception_only returns a list of strings each
# terminated by a newline. We combine them into one string
# and remove the final newline before passing it to
# logger.error.
logger.error(''.join(output).rstrip())
exit_func()
def exit_with_advice(log_path: str):
"""Print a link to the community forums, the debug log path, and exit
The message is printed to stderr and the program will exit with a
nonzero status.
:param str log_path: path to file or directory containing the log
"""
msg = ("Ask for help or search for solutions at https://community.letsencrypt.org. "
"See the ")
if os.path.isdir(log_path):
msg += f'logfiles in {log_path} '
else:
msg += f"logfile {log_path} "
msg += 'or re-run Certbot with -v for more details.'
sys.exit(msg)
|
|
# Copyright 2013 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import mock
from neutronclient.common import exceptions as n_exc
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import uuidutils
import webob
from nova.api.openstack.compute import security_groups
from nova import compute
from nova import context
import nova.db
from nova import exception
from nova.network import model
from nova.network.neutronv2 import api as neutron_api
from nova.network.security_group import neutron_driver
from nova.objects import instance as instance_obj
from nova import test
from nova.tests.unit.api.openstack.compute import test_security_groups
from nova.tests.unit.api.openstack import fakes
from nova.tests import uuidsentinel as uuids
UUID_SERVER = uuids.server
class TestNeutronSecurityGroupsTestCase(test.TestCase):
def setUp(self):
super(TestNeutronSecurityGroupsTestCase, self).setUp()
cfg.CONF.set_override('use_neutron', True)
self.original_client = neutron_api.get_client
neutron_api.get_client = get_client
def tearDown(self):
neutron_api.get_client = self.original_client
get_client()._reset()
super(TestNeutronSecurityGroupsTestCase, self).tearDown()
class TestNeutronSecurityGroupsV21(
test_security_groups.TestSecurityGroupsV21,
TestNeutronSecurityGroupsTestCase):
# Used to override set config in the base test in test_security_groups.
use_neutron = True
def _create_sg_template(self, **kwargs):
sg = test_security_groups.security_group_request_template(**kwargs)
return self.controller.create(self.req, body={'security_group': sg})
def _create_network(self):
body = {'network': {'name': 'net1'}}
neutron = get_client()
net = neutron.create_network(body)
body = {'subnet': {'network_id': net['network']['id'],
'cidr': '10.0.0.0/24'}}
neutron.create_subnet(body)
return net
def _create_port(self, **kwargs):
body = {'port': {'binding:vnic_type': model.VNIC_TYPE_NORMAL}}
fields = ['security_groups', 'device_id', 'network_id',
'port_security_enabled', 'ip_allocation']
for field in fields:
if field in kwargs:
body['port'][field] = kwargs[field]
neutron = get_client()
return neutron.create_port(body)
def _create_security_group(self, **kwargs):
body = {'security_group': {}}
fields = ['name', 'description']
for field in fields:
if field in kwargs:
body['security_group'][field] = kwargs[field]
neutron = get_client()
return neutron.create_security_group(body)
def test_create_security_group_with_no_description(self):
# Neutron's security group description field is optional.
pass
def test_create_security_group_with_empty_description(self):
# Neutron's security group description field is optional.
pass
def test_create_security_group_with_blank_name(self):
# Neutron's security group name field is optional.
pass
def test_create_security_group_with_whitespace_name(self):
# Neutron allows security group name to be whitespace.
pass
def test_create_security_group_with_blank_description(self):
# Neutron's security group description field is optional.
pass
def test_create_security_group_with_whitespace_description(self):
# Neutron allows description to be whitespace.
pass
def test_create_security_group_with_duplicate_name(self):
# Neutron allows duplicate names for security groups.
pass
def test_create_security_group_non_string_name(self):
# Neutron allows security group name to be non string.
pass
def test_create_security_group_non_string_description(self):
# Neutron allows non string description.
pass
def test_create_security_group_quota_limit(self):
# Enforced by Neutron server.
pass
def test_update_security_group(self):
# Enforced by Neutron server.
pass
def test_get_security_group_list(self):
self._create_sg_template().get('security_group')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
list_dict = self.controller.index(req)
self.assertEqual(len(list_dict['security_groups']), 2)
def test_get_security_group_list_all_tenants(self):
pass
def test_get_security_group_by_instance(self):
sg = self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
device_id=test_security_groups.FAKE_UUID1)
expected = [{'rules': [], 'tenant_id': 'fake', 'id': sg['id'],
'name': 'test', 'description': 'test-description'}]
self.stub_out('nova.db.instance_get_by_uuid',
test_security_groups.return_server_by_uuid)
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/os-security-groups'
% test_security_groups.FAKE_UUID1)
res_dict = self.server_controller.index(
req, test_security_groups.FAKE_UUID1)['security_groups']
self.assertEqual(expected, res_dict)
def test_get_security_group_by_id(self):
sg = self._create_sg_template().get('security_group')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
% sg['id'])
res_dict = self.controller.show(req, sg['id'])
expected = {'security_group': sg}
self.assertEqual(res_dict, expected)
def test_delete_security_group_by_id(self):
sg = self._create_sg_template().get('security_group')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
sg['id'])
self.controller.delete(req, sg['id'])
def test_delete_security_group_by_admin(self):
sg = self._create_sg_template().get('security_group')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
sg['id'], use_admin_context=True)
self.controller.delete(req, sg['id'])
@mock.patch('nova.compute.utils.refresh_info_cache_for_instance')
def test_delete_security_group_in_use(self, refresh_info_cache_mock):
sg = self._create_sg_template().get('security_group')
self._create_network()
db_inst = fakes.stub_instance(id=1, nw_cache=[], security_groups=[])
_context = context.get_admin_context()
instance = instance_obj.Instance._from_db_object(
_context, instance_obj.Instance(), db_inst,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
neutron = neutron_api.API()
with mock.patch.object(nova.db, 'instance_get_by_uuid',
return_value=db_inst):
neutron.allocate_for_instance(_context, instance, False, None,
security_groups=[sg['id']])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
% sg['id'])
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, sg['id'])
def test_associate_non_running_instance(self):
# Neutron does not care if the instance is running or not. When the
# instances is detected by neutron it will push down the security
# group policy to it.
pass
def test_associate_already_associated_security_group_to_instance(self):
# Neutron security groups does not raise an error if you update a
# port adding a security group to it that was already associated
# to the port. This is because PUT semantics are used.
pass
def test_associate(self):
sg = self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
device_id=UUID_SERVER)
self.stub_out('nova.db.instance_get_by_uuid',
test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/action' %
UUID_SERVER)
self.manager._addSecurityGroup(req, UUID_SERVER, body)
def test_associate_duplicate_names(self):
sg1 = self._create_security_group(name='sg1',
description='sg1')['security_group']
self._create_security_group(name='sg1',
description='sg1')['security_group']
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg1['id']],
device_id=UUID_SERVER)
self.stub_out('nova.db.instance_get_by_uuid',
test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="sg1"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/action' %
UUID_SERVER)
self.assertRaises(webob.exc.HTTPConflict,
self.manager._addSecurityGroup,
req, UUID_SERVER, body)
def test_associate_port_security_enabled_true(self):
sg = self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
port_security_enabled=True,
device_id=UUID_SERVER)
self.stub_out('nova.db.instance_get_by_uuid',
test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/action' %
UUID_SERVER)
self.manager._addSecurityGroup(req, UUID_SERVER, body)
def test_associate_port_security_enabled_false(self):
self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], port_security_enabled=False,
device_id=UUID_SERVER)
self.stub_out('nova.db.instance_get_by_uuid',
test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/action' %
UUID_SERVER)
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup,
req, UUID_SERVER, body)
def test_associate_deferred_ip_port(self):
sg = self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
port_security_enabled=True, ip_allocation='deferred',
device_id=UUID_SERVER)
self.stub_out('nova.db.instance_get_by_uuid',
test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/action' %
UUID_SERVER)
self.manager._addSecurityGroup(req, UUID_SERVER, body)
def test_disassociate_by_non_existing_security_group_name(self):
self.stub_out('nova.db.instance_get_by_uuid',
test_security_groups.return_server)
body = dict(removeSecurityGroup=dict(name='non-existing'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/action' %
UUID_SERVER)
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup,
req, UUID_SERVER, body)
def test_disassociate_non_running_instance(self):
# Neutron does not care if the instance is running or not. When the
# instances is detected by neutron it will push down the security
# group policy to it.
pass
def test_disassociate_already_associated_security_group_to_instance(self):
# Neutron security groups does not raise an error if you update a
# port adding a security group to it that was already associated
# to the port. This is because PUT semantics are used.
pass
def test_disassociate(self):
sg = self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
device_id=UUID_SERVER)
self.stub_out('nova.db.instance_get_by_uuid',
test_security_groups.return_server)
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/action' %
UUID_SERVER)
self.manager._removeSecurityGroup(req, UUID_SERVER, body)
def test_get_instances_security_groups_bindings(self):
servers = [{'id': test_security_groups.FAKE_UUID1},
{'id': test_security_groups.FAKE_UUID2}]
sg1 = self._create_sg_template(name='test1').get('security_group')
sg2 = self._create_sg_template(name='test2').get('security_group')
# test name='' is replaced with id
sg3 = self._create_sg_template(name='').get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg1['id'],
sg2['id']],
device_id=test_security_groups.FAKE_UUID1)
self._create_port(
network_id=net['network']['id'], security_groups=[sg2['id'],
sg3['id']],
device_id=test_security_groups.FAKE_UUID2)
expected = {test_security_groups.FAKE_UUID1: [{'name': sg1['name']},
{'name': sg2['name']}],
test_security_groups.FAKE_UUID2: [{'name': sg2['name']},
{'name': sg3['id']}]}
security_group_api = self.controller.security_group_api
bindings = (
security_group_api.get_instances_security_groups_bindings(
context.get_admin_context(), servers))
self.assertEqual(bindings, expected)
def test_get_instance_security_groups(self):
sg1 = self._create_sg_template(name='test1').get('security_group')
sg2 = self._create_sg_template(name='test2').get('security_group')
# test name='' is replaced with id
sg3 = self._create_sg_template(name='').get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg1['id'],
sg2['id'],
sg3['id']],
device_id=test_security_groups.FAKE_UUID1)
expected = [{'name': sg1['name']}, {'name': sg2['name']},
{'name': sg3['id']}]
security_group_api = self.controller.security_group_api
sgs = security_group_api.get_instance_security_groups(
context.get_admin_context(),
instance_obj.Instance(uuid=test_security_groups.FAKE_UUID1))
self.assertEqual(sgs, expected)
@mock.patch('nova.network.security_group.neutron_driver.SecurityGroupAPI.'
'get_instances_security_groups_bindings')
def test_get_security_group_empty_for_instance(self, neutron_sg_bind_mock):
servers = [{'id': test_security_groups.FAKE_UUID1}]
neutron_sg_bind_mock.return_value = {}
security_group_api = self.controller.security_group_api
ctx = context.get_admin_context()
sgs = security_group_api.get_instance_security_groups(ctx,
instance_obj.Instance(uuid=test_security_groups.FAKE_UUID1))
neutron_sg_bind_mock.assert_called_once_with(ctx, servers, False)
self.assertEqual([], sgs)
def test_create_port_with_sg_and_port_security_enabled_true(self):
sg1 = self._create_sg_template(name='test1').get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg1['id']],
port_security_enabled=True,
device_id=test_security_groups.FAKE_UUID1)
security_group_api = self.controller.security_group_api
sgs = security_group_api.get_instance_security_groups(
context.get_admin_context(),
instance_obj.Instance(uuid=test_security_groups.FAKE_UUID1))
self.assertEqual(sgs, [{'name': 'test1'}])
def test_create_port_with_sg_and_port_security_enabled_false(self):
sg1 = self._create_sg_template(name='test1').get('security_group')
net = self._create_network()
self.assertRaises(exception.SecurityGroupCannotBeApplied,
self._create_port,
network_id=net['network']['id'],
security_groups=[sg1['id']],
port_security_enabled=False,
device_id=test_security_groups.FAKE_UUID1)
class TestNeutronSecurityGroupRulesTestCase(TestNeutronSecurityGroupsTestCase):
def setUp(self):
super(TestNeutronSecurityGroupRulesTestCase, self).setUp()
id1 = '11111111-1111-1111-1111-111111111111'
sg_template1 = test_security_groups.security_group_template(
security_group_rules=[], id=id1)
id2 = '22222222-2222-2222-2222-222222222222'
sg_template2 = test_security_groups.security_group_template(
security_group_rules=[], id=id2)
self.controller_sg = security_groups.SecurityGroupController()
neutron = get_client()
neutron._fake_security_groups[id1] = sg_template1
neutron._fake_security_groups[id2] = sg_template2
def tearDown(self):
neutron_api.get_client = self.original_client
get_client()._reset()
super(TestNeutronSecurityGroupsTestCase, self).tearDown()
class _TestNeutronSecurityGroupRulesBase(object):
def test_create_add_existing_rules_by_cidr(self):
sg = test_security_groups.security_group_template()
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.controller_sg.create(req, {'security_group': sg})
rule = test_security_groups.security_group_rule_template(
cidr='15.0.0.0/8', parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.controller.create(req, {'security_group_rule': rule})
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_add_existing_rules_by_group_id(self):
sg = test_security_groups.security_group_template()
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.controller_sg.create(req, {'security_group': sg})
rule = test_security_groups.security_group_rule_template(
group=self.sg1['id'], parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.controller.create(req, {'security_group_rule': rule})
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_delete(self):
rule = test_security_groups.security_group_rule_template(
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
% security_group_rule['id'])
self.controller.delete(req, security_group_rule['id'])
def test_create_rule_quota_limit(self):
# Enforced by neutron
pass
class TestNeutronSecurityGroupRulesV21(
_TestNeutronSecurityGroupRulesBase,
test_security_groups.TestSecurityGroupRulesV21,
TestNeutronSecurityGroupRulesTestCase):
# Used to override set config in the base test in test_security_groups.
use_neutron = True
class TestNeutronSecurityGroupsOutputTest(TestNeutronSecurityGroupsTestCase):
content_type = 'application/json'
def setUp(self):
super(TestNeutronSecurityGroupsOutputTest, self).setUp()
fakes.stub_out_nw_api(self)
self.controller = security_groups.SecurityGroupController()
self.stubs.Set(compute.api.API, 'get',
test_security_groups.fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all',
test_security_groups.fake_compute_get_all)
self.stubs.Set(compute.api.API, 'create',
test_security_groups.fake_compute_create)
self.stubs.Set(neutron_driver.SecurityGroupAPI,
'get_instances_security_groups_bindings',
(test_security_groups.
fake_get_instances_security_groups_bindings))
def _make_request(self, url, body=None):
req = fakes.HTTPRequest.blank(url)
if body:
req.method = 'POST'
req.body = encodeutils.safe_encode(self._encode_body(body))
req.content_type = self.content_type
req.headers['Accept'] = self.content_type
# NOTE: This 'os-security-groups' is for enabling security_groups
# attribute on response body.
res = req.get_response(fakes.wsgi_app_v21())
return res
def _encode_body(self, body):
return jsonutils.dumps(body)
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def _get_groups(self, server):
return server.get('security_groups')
def test_create(self):
url = '/v2/fake/servers'
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
security_groups = [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
for security_group in security_groups:
sg = test_security_groups.security_group_template(
name=security_group['name'])
self.controller.create(req, {'security_group': sg})
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2,
security_groups=security_groups)
res = self._make_request(url, {'server': server})
self.assertEqual(res.status_int, 202)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_create_server_get_default_security_group(self):
url = '/v2/fake/servers'
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
res = self._make_request(url, {'server': server})
self.assertEqual(res.status_int, 202)
server = self._get_server(res.body)
group = self._get_groups(server)[0]
self.assertEqual(group.get('name'), 'default')
def test_show(self):
def fake_get_instance_security_groups(inst, context, id):
return [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
self.stubs.Set(neutron_driver.SecurityGroupAPI,
'get_instance_security_groups',
fake_get_instance_security_groups)
url = '/v2/fake/servers'
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
security_groups = [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
for security_group in security_groups:
sg = test_security_groups.security_group_template(
name=security_group['name'])
self.controller.create(req, {'security_group': sg})
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2,
security_groups=security_groups)
res = self._make_request(url, {'server': server})
self.assertEqual(res.status_int, 202)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
# Test that show (GET) returns the same information as create (POST)
url = '/v2/fake/servers/' + test_security_groups.UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_detail(self):
url = '/v2/fake/servers/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
for i, server in enumerate(self._get_servers(res.body)):
for j, group in enumerate(self._get_groups(server)):
name = 'fake-%s-%s' % (i, j)
self.assertEqual(group.get('name'), name)
def test_no_instance_passthrough_404(self):
def fake_compute_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
url = '/v2/fake/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
res = self._make_request(url)
self.assertEqual(res.status_int, 404)
def get_client(context=None, admin=False):
return MockClient()
class MockClient(object):
# Needs to be global to survive multiple calls to get_client.
_fake_security_groups = {}
_fake_ports = {}
_fake_networks = {}
_fake_subnets = {}
_fake_security_group_rules = {}
def __init__(self):
# add default security group
if not len(self._fake_security_groups):
ret = {'name': 'default', 'description': 'default',
'tenant_id': 'fake_tenant', 'security_group_rules': [],
'id': uuidutils.generate_uuid()}
self._fake_security_groups[ret['id']] = ret
def _reset(self):
self._fake_security_groups.clear()
self._fake_ports.clear()
self._fake_networks.clear()
self._fake_subnets.clear()
self._fake_security_group_rules.clear()
def create_security_group(self, body=None):
s = body.get('security_group')
if not isinstance(s.get('name', ''), six.string_types):
msg = ('BadRequest: Invalid input for name. Reason: '
'None is not a valid string.')
raise n_exc.BadRequest(message=msg)
if not isinstance(s.get('description.', ''), six.string_types):
msg = ('BadRequest: Invalid input for description. Reason: '
'None is not a valid string.')
raise n_exc.BadRequest(message=msg)
if len(s.get('name')) > 255 or len(s.get('description')) > 255:
msg = 'Security Group name great than 255'
raise n_exc.NeutronClientException(message=msg, status_code=401)
ret = {'name': s.get('name'), 'description': s.get('description'),
'tenant_id': 'fake', 'security_group_rules': [],
'id': uuidutils.generate_uuid()}
self._fake_security_groups[ret['id']] = ret
return {'security_group': ret}
def create_network(self, body):
n = body.get('network')
ret = {'status': 'ACTIVE', 'subnets': [], 'name': n.get('name'),
'admin_state_up': n.get('admin_state_up', True),
'tenant_id': 'fake_tenant',
'id': uuidutils.generate_uuid()}
if 'port_security_enabled' in n:
ret['port_security_enabled'] = n['port_security_enabled']
self._fake_networks[ret['id']] = ret
return {'network': ret}
def create_subnet(self, body):
s = body.get('subnet')
try:
net = self._fake_networks[s.get('network_id')]
except KeyError:
msg = 'Network %s not found' % s.get('network_id')
raise n_exc.NeutronClientException(message=msg, status_code=404)
ret = {'name': s.get('name'), 'network_id': s.get('network_id'),
'tenant_id': 'fake_tenant', 'cidr': s.get('cidr'),
'id': uuidutils.generate_uuid(), 'gateway_ip': '10.0.0.1'}
net['subnets'].append(ret['id'])
self._fake_networks[net['id']] = net
self._fake_subnets[ret['id']] = ret
return {'subnet': ret}
def create_port(self, body):
p = body.get('port')
ret = {'status': 'ACTIVE', 'id': uuidutils.generate_uuid(),
'mac_address': p.get('mac_address', 'fa:16:3e:b8:f5:fb'),
'device_id': p.get('device_id', uuidutils.generate_uuid()),
'admin_state_up': p.get('admin_state_up', True),
'security_groups': p.get('security_groups', []),
'network_id': p.get('network_id'),
'ip_allocation': p.get('ip_allocation'),
'binding:vnic_type':
p.get('binding:vnic_type') or model.VNIC_TYPE_NORMAL}
network = self._fake_networks[p['network_id']]
if 'port_security_enabled' in p:
ret['port_security_enabled'] = p['port_security_enabled']
elif 'port_security_enabled' in network:
ret['port_security_enabled'] = network['port_security_enabled']
port_security = ret.get('port_security_enabled', True)
# port_security must be True if security groups are present
if not port_security and ret['security_groups']:
raise exception.SecurityGroupCannotBeApplied()
if network['subnets'] and p.get('ip_allocation') != 'deferred':
ret['fixed_ips'] = [{'subnet_id': network['subnets'][0],
'ip_address': '10.0.0.1'}]
if not ret['security_groups'] and (port_security is None or
port_security is True):
for security_group in self._fake_security_groups.values():
if security_group['name'] == 'default':
ret['security_groups'] = [security_group['id']]
break
self._fake_ports[ret['id']] = ret
return {'port': ret}
def create_security_group_rule(self, body):
# does not handle bulk case so just picks rule[0]
r = body.get('security_group_rules')[0]
fields = ['direction', 'protocol', 'port_range_min', 'port_range_max',
'ethertype', 'remote_ip_prefix', 'tenant_id',
'security_group_id', 'remote_group_id']
ret = {}
for field in fields:
ret[field] = r.get(field)
ret['id'] = uuidutils.generate_uuid()
self._fake_security_group_rules[ret['id']] = ret
return {'security_group_rules': [ret]}
def show_security_group(self, security_group, **_params):
try:
sg = self._fake_security_groups[security_group]
except KeyError:
msg = 'Security Group %s not found' % security_group
raise n_exc.NeutronClientException(message=msg, status_code=404)
for security_group_rule in self._fake_security_group_rules.values():
if security_group_rule['security_group_id'] == sg['id']:
sg['security_group_rules'].append(security_group_rule)
return {'security_group': sg}
def show_security_group_rule(self, security_group_rule, **_params):
try:
return {'security_group_rule':
self._fake_security_group_rules[security_group_rule]}
except KeyError:
msg = 'Security Group rule %s not found' % security_group_rule
raise n_exc.NeutronClientException(message=msg, status_code=404)
def show_network(self, network, **_params):
try:
return {'network':
self._fake_networks[network]}
except KeyError:
msg = 'Network %s not found' % network
raise n_exc.NeutronClientException(message=msg, status_code=404)
def show_port(self, port, **_params):
try:
return {'port':
self._fake_ports[port]}
except KeyError:
msg = 'Port %s not found' % port
raise n_exc.NeutronClientException(message=msg, status_code=404)
def show_subnet(self, subnet, **_params):
try:
return {'subnet':
self._fake_subnets[subnet]}
except KeyError:
msg = 'Port %s not found' % subnet
raise n_exc.NeutronClientException(message=msg, status_code=404)
def list_security_groups(self, **_params):
ret = []
for security_group in self._fake_security_groups.values():
names = _params.get('name')
if names:
if not isinstance(names, list):
names = [names]
for name in names:
if security_group.get('name') == name:
ret.append(security_group)
ids = _params.get('id')
if ids:
if not isinstance(ids, list):
ids = [ids]
for id in ids:
if security_group.get('id') == id:
ret.append(security_group)
elif not (names or ids):
ret.append(security_group)
return {'security_groups': ret}
def list_networks(self, **_params):
# neutronv2/api.py _get_available_networks calls this assuming
# search_opts filter "shared" is implemented and not ignored
shared = _params.get("shared", None)
if shared:
return {'networks': []}
else:
return {'networks':
[network for network in self._fake_networks.values()]}
def list_ports(self, **_params):
ret = []
device_id = _params.get('device_id')
for port in self._fake_ports.values():
if device_id:
if port['device_id'] in device_id:
ret.append(port)
else:
ret.append(port)
return {'ports': ret}
def list_subnets(self, **_params):
return {'subnets':
[subnet for subnet in self._fake_subnets.values()]}
def list_floatingips(self, **_params):
return {'floatingips': []}
def delete_security_group(self, security_group):
self.show_security_group(security_group)
ports = self.list_ports()
for port in ports.get('ports'):
for sg_port in port['security_groups']:
if sg_port == security_group:
msg = ('Unable to delete Security group %s in use'
% security_group)
raise n_exc.NeutronClientException(message=msg,
status_code=409)
del self._fake_security_groups[security_group]
def delete_security_group_rule(self, security_group_rule):
self.show_security_group_rule(security_group_rule)
del self._fake_security_group_rules[security_group_rule]
def delete_network(self, network):
self.show_network(network)
self._check_ports_on_network(network)
for subnet in self._fake_subnets.values():
if subnet['network_id'] == network:
del self._fake_subnets[subnet['id']]
del self._fake_networks[network]
def delete_port(self, port):
self.show_port(port)
del self._fake_ports[port]
def update_port(self, port, body=None):
self.show_port(port)
self._fake_ports[port].update(body['port'])
return {'port': self._fake_ports[port]}
def list_extensions(self, **_parms):
return {'extensions': []}
def _check_ports_on_network(self, network):
ports = self.list_ports()
for port in ports:
if port['network_id'] == network:
msg = ('Unable to complete operation on network %s. There is '
'one or more ports still in use on the network'
% network)
raise n_exc.NeutronClientException(message=msg, status_code=409)
def find_resource(self, resource, name_or_id, project_id=None,
cmd_resource=None, parent_id=None, fields=None):
if resource == 'security_group':
# lookup first by unique id
sg = self._fake_security_groups.get(name_or_id)
if sg:
return sg
# lookup by name, raise an exception on duplicates
res = None
for sg in self._fake_security_groups.values():
if sg['name'] == name_or_id:
if res:
raise n_exc.NeutronClientNoUniqueMatch(
resource=resource, name=name_or_id)
res = sg
if res:
return res
raise n_exc.NotFound("Fake %s '%s' not found." %
(resource, name_or_id))
|
|
#
# Licensed under the BSD license. See full license in LICENSE file.
# http://www.lightshowpi.com/
#
# Author: Todd Giles ([email protected])
# Author: Tom Enos ([email protected])
"""FFT methods for computing / analyzing frequency response of audio.
This is simply a wrapper around FFT support in numpy.
Initial FFT code inspired from the code posted here:
http://www.raspberrypi.org/phpBB3/viewtopic.php?t=35838&p=454041
Optimizations from work by Scott Driscoll:
http://www.instructables.com/id/Raspberry-Pi-Spectrum-Analyzer-with-RGB-LED-Strip-/
Third party dependencies:
numpy: for FFT calculation - http://www.numpy.org/
"""
import ConfigParser
import logging
import os.path
from numpy import *
class FFT(object):
def __init__(self,
chunk_size,
sample_rate,
num_bins,
min_frequency,
max_frequency,
custom_channel_mapping,
custom_channel_frequencies,
input_channels=2):
"""
:param chunk_size: chunk size of audio data
:type chunk_size: int
:param sample_rate: audio file sample rate
:type sample_rate: int
:param num_bins: length of gpio to process
:type num_bins: int
:param input_channels: number of audio input channels to process for (default=2)
:type input_channels: int
:param min_frequency: lowest frequency for which a channel will be activated
:type min_frequency: float
:param max_frequency: max frequency for which a channel will be activated.
:type max_frequency: float
:param custom_channel_mapping: custom map of channels to different frequencies
:type custom_channel_mapping: list | int
:param custom_channel_frequencies: custom list of frequencies that should be
utilized for each channel
:type custom_channel_frequencies: list | int
"""
self.chunk_size = chunk_size
self.sample_rate = sample_rate
self.num_bins = num_bins
self.input_channels = input_channels
self.window = hanning(0)
self.piff = list()
self.min_frequency = min_frequency
self.max_frequency = max_frequency
self.custom_channel_mapping = custom_channel_mapping
self.custom_channel_frequencies = custom_channel_frequencies
self.frequency_limits = self.calculate_channel_frequency()
self.config = ConfigParser.RawConfigParser(allow_no_value=True)
self.config_filename = ""
def calculate_levels(self, data):
"""Calculate frequency response for each channel defined in frequency_limits
:param data: decoder.frames(), audio data for fft calculations
:type data: decoder.frames
:return:
:rtype: numpy.array
"""
if len(self.piff) < 1:
fl = array(self.frequency_limits)
self.piff = ((fl * self.chunk_size) / self.sample_rate).astype(int)
for a in range(len(self.piff)):
if self.piff[a][0] == self.piff[a][1]:
self.piff[a][1] += 1
# create a numpy array, taking just the left channel if stereo
data_stereo = frombuffer(data, dtype="int16")
if self.input_channels == 2:
# data has 2 bytes per channel
# pull out the even values, just using left channel
data = array(data_stereo[::2])
elif self.input_channels == 1:
data = data_stereo
# if you take an FFT of a chunk of audio, the edges will look like
# super high frequency cutoffs. Applying a window tapers the edges
# of each end of the chunk down to zero.
if len(data) != len(self.window):
self.window = hanning(len(data))
data = data * self.window
# Apply FFT - real data
fourier = fft.rfft(data)
# Remove last element in array to make it the same size as chunk_size
fourier.resize(len(fourier) - 1)
# Calculate the power spectrum
power = abs(fourier) ** 2
cache_matrix = empty(self.num_bins, dtype='float64')
for pin in range(self.num_bins):
# Get the sum of the power array index corresponding to a
# particular frequency.
cache_matrix[pin] = sum(power[self.piff[pin][0]:self.piff[pin][1]])
# take the log10 of the resulting sum to approximate how human ears
# perceive sound levels
if all(cache_matrix == 0.0):
return cache_matrix
with errstate(divide='ignore'):
cache_matrix = where(cache_matrix > 0.0, log10(cache_matrix), 0)
return cache_matrix
def calculate_channel_frequency(self):
"""Calculate frequency values
Calculate frequency values for each channel,
taking into account custom settings.
:return: frequency values for each channel
:rtype: list
"""
# How many channels do we need to calculate the frequency for
if self.custom_channel_mapping != 0 and len(self.custom_channel_mapping) == self.num_bins:
logging.debug("Custom Channel Mapping is being used: %s",
str(self.custom_channel_mapping))
channel_length = max(self.custom_channel_mapping)
else:
logging.debug("Normal Channel Mapping is being used.")
channel_length = self.num_bins
logging.debug("Calculating frequencies for %d channels.", channel_length)
octaves = (log(self.max_frequency / self.min_frequency)) / log(2)
logging.debug("octaves in selected frequency range ... %s", octaves)
octaves_per_channel = octaves / channel_length
frequency_limits = []
frequency_store = []
frequency_limits.append(self.min_frequency)
if self.custom_channel_frequencies != 0 and (
len(self.custom_channel_frequencies) >= channel_length + 1):
logging.debug("Custom channel frequencies are being used")
frequency_limits = self.custom_channel_frequencies
else:
logging.debug("Custom channel frequencies are not being used")
for pin in range(1, self.num_bins + 1):
frequency_limits.append(frequency_limits[-1]
* 10 ** (3 / (10 * (1 / octaves_per_channel))))
for pin in range(0, channel_length):
frequency_store.append((frequency_limits[pin], frequency_limits[pin + 1]))
logging.debug("channel %d is %6.2f to %6.2f ", pin, frequency_limits[pin],
frequency_limits[pin + 1])
# we have the frequencies now lets map them if custom mapping is defined
if self.custom_channel_mapping != 0 and len(self.custom_channel_mapping) == self.num_bins:
frequency_map = []
for pin in range(0, self.num_bins):
mapped_channel = self.custom_channel_mapping[pin] - 1
mapped_frequency_set = frequency_store[mapped_channel]
mapped_frequency_set_low = mapped_frequency_set[0]
mapped_frequency_set_high = mapped_frequency_set[1]
logging.debug("mapped channel: " + str(mapped_channel) + " will hold LOW: "
+ str(mapped_frequency_set_low) + " HIGH: "
+ str(mapped_frequency_set_high))
frequency_map.append(mapped_frequency_set)
return frequency_map
else:
return frequency_store
def compare_config(self, cache_filename):
"""
Compare the current configuration used to generate fft to a saved
copy of the data that was used to generate the fft data for an
existing cache file
:param cache_filename: path and name of cache file
:type cache_filename: str
"""
# TODO(mdietz): Setting this here is so dumb
self.config_filename = cache_filename.replace(".sync", ".cfg")
if not os.path.isfile(self.config_filename):
logging.warn("No cached config data found")
return False
else:
has_config = True
with open(self.config_filename) as f:
self.config.readfp(f)
fft_cache = dict()
fft_current = dict()
try:
fft_cache["chunk_size"] = self.config.getint("fft", "chunk_size")
fft_cache["sample_rate"] = self.config.getint("fft", "sample_rate")
fft_cache["num_bins"] = self.config.getint("fft", "num_bins")
fft_cache["min_frequency"] = self.config.getfloat("fft",
"min_frequency")
fft_cache["max_frequency"] = self.config.getfloat("fft",
"max_frequency")
temp = [int(channel) for channel in
self.config.get("fft",
"custom_channel_mapping").split(',')]
if len(temp) == 1:
fft_cache["custom_channel_mapping"] = temp[0]
else:
fft_cache["custom_channel_mapping"] = temp
temp = [int(channel) for channel in
self.config.get("fft",
"custom_channel_frequencies").split(',')]
if len(temp) == 1:
fft_cache["custom_channel_frequencies"] = temp[0]
else:
fft_cache["custom_channel_frequencies"] = temp
fft_cache["input_channels"] = self.config.getint("fft",
"input_channels")
except ConfigParser.Error:
has_config = False
fft_current["chunk_size"] = self.chunk_size
fft_current["sample_rate"] = self.sample_rate
fft_current["num_bins"] = self.num_bins
fft_current["min_frequency"] = self.min_frequency
fft_current["max_frequency"] = self.max_frequency
fft_current["custom_channel_mapping"] = self.custom_channel_mapping
freq = self.custom_channel_frequencies
fft_current["custom_channel_frequencies"] = freq
fft_current["input_channels"] = self.input_channels
if fft_cache != fft_current:
has_config = False
logging.warn("Cached config data does not match")
return has_config
def save_config(self):
"""Save the current configuration used to generate the fft data"""
if not self.config.has_section("fft"):
self.config.add_section("fft")
self.config.set('fft', 'chunk_size', str(self.chunk_size))
self.config.set('fft', 'sample_rate', str(self.sample_rate))
self.config.set('fft', 'num_bins', str(self.num_bins))
self.config.set('fft', 'min_frequency', str(self.min_frequency))
self.config.set('fft', 'max_frequency', str(self.max_frequency))
if isinstance(self.custom_channel_mapping, list):
self.config.set('fft', 'custom_channel_mapping',
str(self.custom_channel_mapping)[1:-1])
else:
self.config.set('fft', 'custom_channel_mapping',
str(self.custom_channel_mapping))
if isinstance(self.custom_channel_frequencies, list):
self.config.set('fft', 'custom_channel_frequencies',
str(self.custom_channel_frequencies)[1:-1])
else:
self.config.set('fft', 'custom_channel_frequencies',
str(self.custom_channel_frequencies))
self.config.set('fft', 'input_channels', str(self.input_channels))
with open(self.config_filename, "w") as f:
self.config.write(f)
|
|
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
from django.test import RequestFactory
from tastypie.resources import ALL_WITH_RELATIONS, ALL
from tastypie.resources import ModelResource
from tastypie.exceptions import BadRequest
from django.conf import settings
from django.conf.urls import *
from django.http import HttpResponse, QueryDict
import shortuuid
from tastypie.resources import ModelResource
from tastypie import fields
from django.contrib.auth.tokens import default_token_generator
from tastypie.exceptions import ImmediateHttpResponse
from django.contrib.auth.forms import PasswordResetForm
from cbh_core_model.models import CustomFieldConfig
from cbh_core_model.models import DataType
from cbh_core_model.models import Project
from cbh_core_model.models import ProjectType
from cbh_core_model.models import SkinningConfig
from cbh_core_model.models import Invitation
from cbh_core_ws.authorization import ProjectListAuthorization, InviteAuthorization, viewer_projects, ProjectPermissionAuthorization
from tastypie.authentication import SessionAuthentication
from tastypie.paginator import Paginator
from cbh_core_ws.serializers import CustomFieldsSerializer
from django.db.models import Prefetch
from tastypie.resources import ALL_WITH_RELATIONS
from tastypie.utils.mime import build_content_type
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.conf import settings
from cbh_core_ws.authorization import get_all_project_ids_for_user
from django.conf import settings
from django.views.generic import FormView, View
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import login as auth_login, logout as auth_logout
from tastypie.resources import ModelResource
from tastypie.authorization import Authorization
from tastypie import http
from django.contrib.auth.views import password_reset
from django.db import IntegrityError
try:
# django >= 1.7
from django.apps import apps
get_model = apps.get_model
except ImportError:
# django < 1.7
from django.db.models import get_model
# If ``csrf_exempt`` isn't present, stub it.
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
def csrf_exempt(func):
return func
from BeautifulSoup import BeautifulSoup
try:
import defusedxml.lxml as lxml
except ImportError:
lxml = None
try:
WS_DEBUG = settings.WS_DEBUG
except AttributeError:
WS_DEBUG = False
from tastypie.authentication import SessionAuthentication
from django.contrib.auth import get_user_model
import inflection
import six
import importlib
from django.views.generic import TemplateView
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from django.contrib.auth.forms import PasswordResetForm, loader, get_current_site, urlsafe_base64_encode, force_bytes
from urllib import urlencode
from django.core.mail import EmailMessage
from django.contrib.auth.models import Permission
import re
from django_hstore import hstore
class CBHDictField(fields.ApiField):
"""
A dictionary field.
"""
dehydrated_type = 'dict'
help_text = "A dictionary of data. Ex: {'price': 26.73, 'name': 'Daniel'}"
def convert(self, value):
try:
return dict(value)
except ValueError:
return hstore.SerializedDictionaryField()._deserialize_dict(value)
class UserHydrate(object):
def hydrate_created_by(self, bundle):
User = get_user_model()
if bundle.obj.id:
pass
else:
user = User.objects.get(pk=bundle.request.user.pk)
bundle.obj.created_by = user
return bundle
class CSRFExemptMixin(object):
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(CSRFExemptMixin, self).dispatch(*args, **kwargs)
def get_field_name_from_key(key):
return key.replace(u"__space__", u" ")
def get_key_from_field_name(name):
return name.replace(u" ", u"__space__")
from django.middleware.csrf import get_token
class SimpleResourceURIField(fields.ApiField):
"""
Provide just the id field as a resource URI
"""
dehydrated_type = 'string'
is_related = False
self_referential = False
help_text = 'A related resource. Can be either a URI or set of nested resource data.'
def __init__(self, to, attribute, full=False, related_name=None, default=fields.NOT_PROVIDED, null=False, blank=False, readonly=False, unique=False, help_text=None, use_in='all'):
"""
"""
super(SimpleResourceURIField, self).__init__(attribute=attribute, default=default, null=null, blank=blank,
readonly=readonly, unique=unique, help_text=help_text, use_in=use_in)
self.related_name = related_name
self.to = to
self._to_class = None
self._rel_resources = {}
self.api_name = None
self.resource_name = None
if self.to == 'self':
self.self_referential = True
def contribute_to_class(self, cls, name):
super(SimpleResourceURIField, self).contribute_to_class(cls, name)
# Check if we're self-referential and hook it up.
# We can't do this quite like Django because there's no ``AppCache``
# here (which I think we should avoid as long as possible).
if self.self_referential or self.to == 'self':
self._to_class = cls
def convert(self, value):
"""
Handles conversion between the data found and the type of the field.
Extending classes should override this method and provide correct
data coercion.
"""
if value is None:
return None
cls = self.to_class()
resource_uri = cls.get_resource_uri()
return "%s/%d" % (resource_uri, value)
def hydrate(self, bundle):
"""
Takes data stored in the bundle for the field and returns it. Used for
taking simple data and building a instance object.
"""
if self.readonly:
return None
if self.instance_name not in bundle.data:
if self.is_related and not self.is_m2m:
# We've got an FK (or alike field) & a possible parent object.
# Check for it.
if bundle.related_obj and bundle.related_name in (self.attribute, self.instance_name):
return bundle.related_obj
if self.blank:
return None
if self.attribute:
try:
val = getattr(bundle.obj, self.attribute, None)
if val is not None:
return val
except ObjectDoesNotExist:
pass
if self.instance_name:
try:
if hasattr(bundle.obj, self.instance_name):
return getattr(bundle.obj, self.instance_name)
except ObjectDoesNotExist:
pass
if self.has_default():
if callable(self._default):
return self._default()
return self._default
if self.null:
return None
raise ApiFieldError(
"The '%s' field has no data and doesn't allow a default or null value." % self.instance_name)
# New code to rerturn URI
value = bundle.data[self.instance_name]
if value is None:
return value
if str(value).endswith("/"):
value = value[:-1]
data = str(value).split("/")
return int(data[len(data) - 1])
@property
def to_class(self):
# We need to be lazy here, because when the metaclass constructs the
# Resources, other classes may not exist yet.
# That said, memoize this so we never have to relookup/reimport.
if self._to_class:
return self._to_class
if not isinstance(self.to, six.string_types):
self._to_class = self.to
return self._to_class
# It's a string. Let's figure it out.
if '.' in self.to:
# Try to import.
module_bits = self.to.split('.')
module_path, class_name = '.'.join(
module_bits[:-1]), module_bits[-1]
module = importlib.import_module(module_path)
else:
# We've got a bare class name here, which won't work (No AppCache
# to rely on). Try to throw a useful error.
raise ImportError(
"Tastypie requires a Python-style path (<module.module.Class>) to lazy load related resources. Only given '%s'." % self.to)
self._to_class = getattr(module, class_name, None)
if self._to_class is None:
raise ImportError("Module '%s' does not appear to have a class called '%s'." % (
module_path, class_name))
return self._to_class
class Index(TemplateView):
template_name = 'dist/index.html' # or define get_template_names()
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
from django.middleware.csrf import get_token
csrf_token = get_token(request)
return self.render_to_response(context)
#-------------------------------------------------------------------------
class ProjectPermissionResource(ModelResource):
"""
Allows updating of user permissions - project owners can change who views, edits and owns their project
Data is retrieved using the codename of the
"""
users = fields.ToManyField("cbh_core_ws.resources.UserResource",attribute="user_set")
# groups = fields.ToManyField(GroupResource, attribute="group_set")
codename = fields.CharField(readonly=True)
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<codename>[\w\d_.-]+)/$" % self._meta.resource_name, self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
class Meta:
queryset = Permission.objects.all()
resource_name = 'cbh_permissions'
allowed_methods = ["get", "post", "patch", "put"]
authentication = SessionAuthentication()
authorization = ProjectPermissionAuthorization()
detail_uri_name = 'codename'
def save(self, bundle, skip_errors=False):
"""Ensure that the m2m bundle is hydrated before continuing as this is needed for the authorization"""
if bundle.via_uri:
return bundle
self.is_valid(bundle)
if bundle.errors and not skip_errors:
raise ImmediateHttpResponse(response=self.error_response(bundle.request, bundle.errors))
m2m_bundle = self.hydrate_m2m(bundle)
# Check if they're authorized.
if bundle.obj.pk:
self.authorized_update_detail(self.get_object_list(bundle.request), m2m_bundle)
else:
self.authorized_create_detail(self.get_object_list(bundle.request), m2m_bundle)
# Save FKs just in case.
self.save_related(bundle)
# Save the main object.
obj_id = self.create_identifier(bundle.obj)
if obj_id not in bundle.objects_saved or bundle.obj._state.adding:
bundle.obj.save()
bundle.objects_saved.add(obj_id)
# Now pick up the M2M bits.
self.save_m2m(m2m_bundle)
return bundle
#-------------------------------------------------------------------------
class Login( CSRFExemptMixin, FormView):
form_class = AuthenticationForm
template_name = "cbh_chembl_ws_extension/login.html"
logout = None
def get(self, request, *args, **kwargs):
from django.middleware.csrf import get_token
csrf_token = get_token(request)
context = self.get_context_data(
form=self.get_form(self.get_form_class()))
redirect_to = settings.LOGIN_REDIRECT_URL
'''Borrowed from django base detail view'''
if "django_webauth" in settings.INSTALLED_APPS:
context["webauth_login"] = True
username = request.META.get('REMOTE_USER', None)
if not username:
# Here we check if this was a redirect after logout in which
# case we show the button to log out of webauth entirely
username = request.META.get('HTTP_X_WEBAUTH_USER', None)
if username:
context["logout"] = True
else:
context["password_login"] = True
if request.user.is_authenticated():
return HttpResponseRedirect(redirect_to)
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form instance with the passed
POST variables and then checked for validity.
"""
redirect_to = settings.LOGIN_REDIRECT_URL
'''Borrowed from django base detail view'''
from django.middleware.csrf import get_token
csrf_token = get_token(request)
if request.user.is_authenticated():
#The user has pressed back in their browser and therefore should be redirected
return HttpResponseRedirect(redirect_to)
form = self.get_form(self.get_form_class())
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
redirect_to = settings.LOGIN_REDIRECT_URL
auth_login(self.request, form.get_user())
if self.request.session.test_cookie_worked():
self.request.session.delete_test_cookie()
# return self.render_to_response(self.get_context_data())
return HttpResponseRedirect(redirect_to)
def form_invalid(self, form):
return self.render_to_response(self.get_context_data(form=form))
# def dispatch(self, request, *args, **kwargs):
# request.session.set_test_cookie()
# return super(Login, self).dispatch(request, *args, **kwargs)
class Logout(View):
def get(self, request, *args, **kwargs):
auth_logout(request)
return HttpResponseRedirect(settings.LOGOUT_REDIRECT_URL)
def build_content_type(format, encoding='utf-8'):
"""
Appends character encoding to the provided format if not already present.
"""
if 'charset' in format:
return format
return "%s; charset=%s" % (format, encoding)
class SkinningResource(ModelResource):
'''URL resourcing for pulling out sitewide skinning config '''
class Meta:
always_return_data = True
queryset = SkinningConfig.objects.all()
resource_name = 'cbh_skinning'
#authorization = Authorization()
include_resource_uri = True
allowed_methods = ['get', 'post', 'put']
default_format = 'application/json'
authentication = SessionAuthentication()
class TemplateProjectFieldResource(ModelResource):
"""Provides the schema information about a field that is required by front end apps"""
class Meta:
queryset = get_model("cbh_core_model","PinnedCustomField").objects.all()
always_return_data = True
resource_name = 'cbh_template_fields'
include_resource_uri = False
allowed_methods = ['get']
default_format = 'application/json'
authentication = SessionAuthentication()
authorization = Authorization()
def dehydrate_id(self, bundle):
return None
def get_field_list(project_type_bundle):
if project_type_bundle.obj.saved_search_project_type:
return project_type_bundle.obj.SAVED_SEARCH_TEMPLATE
#elif project_type_bundle.obj.plate_map_project_type:
# return project_type_bundle.obj.PLATE MAP_TEMPLATE
else:
for field in project_type_bundle.data["custom_field_config_template"]:
field.data["id"] = None
return [field.data for field in project_type_bundle.data["custom_field_config_template"]]
class ProjectTypeResource(ModelResource):
'''Resource for Project Type, specifies whether this is a chemical/inventory instance etc '''
copy_action_name = fields.CharField(default="Clone")
custom_field_config_template = fields.ToManyField("cbh_core_ws.resources.TemplateProjectFieldResource", attribute=lambda bundle: get_model("cbh_core_model","PinnedCustomField").objects.filter(custom_field_config_id=bundle.obj.custom_field_config_template_id) , full=True, readonly=True, null=True)
project_template = fields.DictField(default={})
def alter_list_data_to_serialize(self, request, data):
for bun in data["objects"]:
bun.data["project_template"] = {
"project_type": bun.data["resource_uri"],
"custom_field_config": {
"project_data_fields": get_field_list(bun),
"name": ""
},
"name": ""
}
return data
def dehydrate_copy_action_name(self, bundle):
if bundle.obj.show_compounds:
return "Clone / Add Structure"
else:
return "Clone Item"
class Meta:
always_return_data = True
queryset = ProjectType.objects.all()
resource_name = 'cbh_project_types'
authorization = Authorization()
include_resource_uri = True
allowed_methods = ['get', 'post', 'patch', 'put']
default_format = 'application/json'
authentication = SessionAuthentication()
filtering = {
"saved_search_project_type": ALL,
"plate_map_project_type": ALL
}
class CustomFieldConfigResource(ModelResource):
'''Resource for Custom Field Config '''
class Meta:
always_return_data = True
queryset = CustomFieldConfig.objects.all()
resource_name = 'cbh_custom_field_configs'
#authorization = ProjectListAuthorization()
include_resource_uri = True
allowed_methods = ['get', 'post', 'put']
default_format = 'application/json'
authentication = SessionAuthentication()
filtering = {
"name": ALL_WITH_RELATIONS
}
class DataTypeResource(ModelResource):
'''Resource for data types'''
plural = fields.CharField(null=True)
class Meta:
always_return_data = True
queryset = DataType.objects.all()
resource_name = 'cbh_data_types'
authorization = Authorization()
include_resource_uri = True
allowed_methods = ['get', 'post', 'patch', 'put']
default_format = 'application/json'
authentication = SessionAuthentication()
filtering = {
"name": ALL_WITH_RELATIONS
}
authorization = Authorization()
def dehydrate_plural(self, bundle):
return inflection.pluralize(bundle.obj.name)
class MyPasswordResetForm(PasswordResetForm):
def save(self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator,
from_email=None, request=None, html_email_template_name=None, extra_email_context={}, user=None):
"""
Generates a one-use only link for resetting password and sends to the
user.
"""
from django.core.mail import send_mail
email = self.cleaned_data["email"]
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
c = {
'email': user.email,
'domain': domain,
'site_name': site_name,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'user': user,
'token': token_generator.make_token(user),
'protocol': 'https' if use_https else 'http',
'extra' : extra_email_context,
}
subject = loader.render_to_string(subject_template_name, c)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
html_email = loader.render_to_string(email_template_name, c)
soup = BeautifulSoup(html_email)
email = soup.getText()
send_mail(subject, email, from_email, [user.email], html_message=html_email, fail_silently=False)
class InvitationResource(UserHydrate, ModelResource):
'''Resource for Invitation model. This will setup creation of the invite email and new user '''
created_by = fields.ForeignKey(
"cbh_core_ws.resources.UserResource", 'created_by', full=True)
class Meta:
queryset = Invitation.objects.all()
resource_name = 'invitations'
authorization = InviteAuthorization()
include_resource_uri = True
allowed_methods = ['get', 'post', 'put']
default_format = 'application/json'
authentication = SessionAuthentication()
always_return_data = True
filtering = {
"email": ALL_WITH_RELATIONS
}
def get_form(self, email, new_user, data, created, request, email_template_name, subject_template_name):
server = settings.SERVER_EMAIL
form = MyPasswordResetForm(QueryDict(urlencode({"email": email})))
hostname = request.META["HTTP_ORIGIN"]
if form.is_valid():
form.users_cache = [new_user,]
opts = {
'use_https': request.is_secure(),
'token_generator': default_token_generator,
'from_email': server,
'user' : new_user,
'email_template_name': email_template_name,
'subject_template_name': subject_template_name,
'request': request,
'extra_email_context': {'hostname':hostname, 'invite': data.data, 'login_url' : settings.LOGIN_URL, },
}
form.save(**opts)
else:
raise BadRequest("Email not valid")
def create_response(self, request, data, response_class=HttpResponse, **response_kwargs):
"""
Extracts the common "which-format/serialize/return-response" cycle.
Mostly a useful shortcut/hook.
"""
desired_format = self.determine_format(request)
if response_class == http.HttpCreated:
email = data.data["email"]
if email.endswith("ox.ac.uk"):
#send via webauth
raise BadRequest("We do not yet support inviting users at Oxford to projects. This feature will come soon.")
else:
UserObj = get_user_model()
new_user, created = UserObj.objects.get_or_create(email=email, username=email)
logger.info(data.data)
for perm in data.data["projects_selected"]:
p = Project.objects.get(id=perm["id"])
p.make_viewer(new_user)
p.save()
data.data["message"] = "Invite sent successfully to %s, would you like to invite anyone else?" % email
email_template_name = 'cbh_core_ws/email_new_user.html'
subject_template_name = 'cbh_core_ws/subject_new_user.html'
if not created:
projects_with_reader_access = viewer_projects(new_user)
all_projects_equal = True
all_selected_ids = set([new_proj["id"] for new_proj in data.data["projects_selected"]])
new_ids = all_selected_ids - set(projects_with_reader_access)
if(len(new_ids) > 0):
email_template_name = 'cbh_core_ws/email_project_access_changed.html'
subject_template_name = 'cbh_core_ws/subject_project_access_changed.html'
all_projects_equal = False
data.data["message"] = "Existing user %s invited to new projects, would you like to invite anyone else?" % email
else:
if not data.data.get("remind", False):
raise ImmediateHttpResponse(http.HttpConflict('{"error": "User already exists, do you wish to invite again?"}'))
if new_user.has_usable_password():
email_template_name = 'cbh_core_ws/email_reminder.html'
subject_template_name = 'cbh_core_ws/subject_reminder.html'
data.data["message"] = "Sign-up reminder sent to %s, would you like to invite anyone else?" % email
else:
email_template_name = 'cbh_core_ws/email_reminder_already_logged_on.html'
subject_template_name = 'cbh_core_ws/subject_reminder.html'
data.data["message"] = "User %s reminded to look at these projects, would you like to invite anyone else?" % email
form = self.get_form( email, new_user, data, created, request, email_template_name, subject_template_name)
serialized = self.serialize(request, data, desired_format)
rc = response_class(content=serialized, content_type=build_content_type(
desired_format), **response_kwargs)
return rc
from django.contrib.auth.models import User
class UserResource(ModelResource):
'''Displays information about the User's privileges and personal data'''
can_view_chemreg = fields.BooleanField(default=True)
can_view_assayreg = fields.BooleanField(default=True)
is_logged_in = fields.BooleanField(default=False)
can_create_and_own_projects = fields.BooleanField(default=False)
display_name = fields.CharField(default="")
class Meta:
filtering = {
"username": ALL_WITH_RELATIONS
}
queryset = User.objects.all()
resource_name = 'users'
allowed_methods = ["get", "post"]
excludes = ['email', 'password', 'is_active']
authentication = SessionAuthentication()
authorization = Authorization()
def apply_authorization_limits(self, request, object_list):
return object_list.get(pk=request.user.id)
def get_object_list(self, request):
# return super(UserResource,
# self).get_object_list(request).filter(pk=request.user.id)
return super(UserResource, self).get_object_list(request)
def dehydrate_display_name(self, bundle):
if bundle.obj.first_name:
return "%s %s" % (bundle.obj.first_name, bundle.obj.last_name)
else:
return bundle.obj.username
def dehydrate_can_create_and_own_projects(self, bundle):
"""Internal users (denoted by their email pattern match) are allowed to add and own projects"""
if bundle.obj.is_superuser:
return True
perms = bundle.obj.get_all_permissions()
if "cbh_core_model.add_project" in perms:
return True
return False
def dehydrate_is_logged_in(self, bundle):
if bundle.obj.id == bundle.request.user.id:
return True
return False
def dehydrate_can_view_chemreg(self, bundle):
'''The cbh_core_model.no_chemreg role in the Django admin is used to
deny access to chemreg. As superusers have all permissions by
default they would be denied access therefore we check for superuser status and allow access'''
if bundle.obj.is_superuser:
return True
perms = bundle.obj.get_all_permissions()
if "cbh_core_model.no_chemreg" in perms:
return False
return True
def dehydrate_can_view_assayreg(self, bundle):
'''The cbh_core_model.no_assayreg role in the Django admin is used to
deny access to assayreg. As superusers have all permissions by
default they would be denied access therefore we check for superuser status and allow access'''
if bundle.obj.is_superuser:
return True
perms = bundle.obj.get_all_permissions()
if "cbh_core_model.no_assayreg" in perms:
return False
return True
class CoreProjectResource(ModelResource):
project_type = fields.ForeignKey(
ProjectTypeResource, 'project_type', blank=False, null=False, full=True)
custom_field_config = fields.ForeignKey(
CustomFieldConfigResource, 'custom_field_config', blank=False, null=True, full=True)
class Meta:
queryset = Project.objects.all()
authentication = SessionAuthentication()
paginator_class = Paginator
allowed_methods = ['get']
resource_name = 'cbh_projects'
authorization = ProjectListAuthorization()
include_resource_uri = True
default_format = 'application/json'
#serializer = Serializer()
serializer = CustomFieldsSerializer()
filtering = {
"project_key": ALL_WITH_RELATIONS,
}
def get_object_list(self, request):
return super(CoreProjectResource, self).get_object_list(request).prefetch_related(Prefetch("project_type")).order_by('-modified')
def alter_list_data_to_serialize(self, request, bundle):
'''Here we append a list of tags to the data of the GET request if the
search fields are required'''
userres = UserResource()
userbundle = userres.build_bundle(obj=request.user, request=request)
userbundle = userres.full_dehydrate(userbundle)
bundle['user'] = userbundle.data
def create_response(self, request, data, response_class=HttpResponse, **response_kwargs):
"""
Extracts the common "which-format/serialize/return-response" cycle.
Mostly a useful shortcut/hook.
"""
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
rc = response_class(content=serialized, content_type=build_content_type(
desired_format), **response_kwargs)
if(desired_format == 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'):
rc['Content-Disposition'] = 'attachment; filename=project_data_explanation.xlsx'
return rc
|
|
import operator
from functools import partial, wraps
from itertools import chain, count
from collections import Iterator
from toolz import merge, unique, curry
from .optimize import cull, fuse
from .utils import concrete
from . import base
from .compatibility import apply
from . import threaded
__all__ = ['compute', 'do', 'value', 'Value']
def flat_unique(ls):
"""Flatten ``ls``, filter by unique id, and return a list"""
return list(unique(chain.from_iterable(ls), key=id))
def unzip(ls, nout):
"""Unzip a list of lists into ``nout`` outputs."""
out = list(zip(*ls))
if not out:
out = [()] * nout
return out
def to_task_dasks(expr):
"""Normalize a python object and extract all sub-dasks.
- Replace ``Values`` with their keys
- Convert literals to things the schedulers can handle
- Extract dasks from all enclosed values
Parameters
----------
expr : object
The object to be normalized. This function knows how to handle
``Value``s, as well as most builtin python types.
Returns
-------
task : normalized task to be run
dasks : list of dasks that form the dag for this task
Examples
--------
>>> a = value(1, 'a')
>>> b = value(2, 'b')
>>> task, dasks = to_task_dasks([a, b, 3])
>>> task # doctest: +SKIP
(list, ['a', 'b', 3])
>>> dasks # doctest: +SKIP
[{'a': 1}, {'b': 2}]
>>> task, dasks = to_task_dasks({a: 1, b: 2})
>>> task # doctest: +SKIP
(dict, (list, [(list, ['a', 1]), (list, ['b', 2])]))
>>> dasks # doctest: +SKIP
[{'a': 1}, {'b': 2}]
"""
if isinstance(expr, Value):
return expr.key, expr._dasks
if isinstance(expr, base.Base):
name = tokenize(expr, True)
keys = expr._keys()
dsk = expr._optimize(expr.dask, keys)
dsk[name] = (expr._finalize, expr, (concrete, keys))
return name, [dsk]
if isinstance(expr, tuple) and type(expr) != tuple:
return expr, []
if isinstance(expr, (Iterator, list, tuple, set)):
args, dasks = unzip(map(to_task_dasks, expr), 2)
args = list(args)
dasks = flat_unique(dasks)
# Ensure output type matches input type
if isinstance(expr, (list, tuple, set)):
return (type(expr), args), dasks
else:
return args, dasks
if isinstance(expr, dict):
args, dasks = to_task_dasks(list([k, v] for k, v in expr.items()))
return (dict, args), dasks
return expr, []
tokens = ('_{0}'.format(i) for i in count(1))
def tokenize(*args, **kwargs):
"""Mapping function from task -> consistent name.
Parameters
----------
args : object
Python objects that summarize the task.
pure : boolean, optional
If True, a consistent hash function is tried on the input. If this
fails, then a unique identifier is used. If False (default), then a
unique identifier is always used.
"""
if kwargs.pop('pure', False):
return base.tokenize(*args)
return next(tokens)
def applyfunc(func, args, kwargs, pure=False):
"""Create a Value by applying a function to args.
Given a function and arguments, return a Value that represents the result
of that computation."""
args, dasks = unzip(map(to_task_dasks, args), 2)
dasks = flat_unique(dasks)
if kwargs:
func = partial(func, **kwargs)
name = tokenize(func, *args, pure=pure)
dasks.append({name: (func,) + args})
return Value(name, dasks)
@curry
def do(func, pure=False):
"""Wraps a function so that it outputs a ``Value``.
Examples
--------
Can be used as a decorator:
>>> @do
... def add(a, b):
... return a + b
>>> res = add(1, 2)
>>> type(res) == Value
True
>>> res.compute()
3
For other cases, it may be cleaner to call ``do`` on a function at call
time:
>>> res2 = do(sum)([res, 2, 3])
>>> res2.compute()
8
``do`` also accepts an optional keyword ``pure``. If False (default), then
subsequent calls will always produce a different ``Value``. This is useful
for non-pure functions (such as ``time`` or ``random``).
>>> from random import random
>>> out1 = do(random)()
>>> out2 = do(random)()
>>> out1.key == out2.key
False
If you know a function is pure (output only depends on the input, with no
global state), then you can set ``pure=True``. This will attempt to apply a
consistent name to the output, but will fallback on the same behavior of
``pure=False`` if this fails.
>>> @do(pure=True)
... def add(a, b):
... return a + b
>>> out1 = add(1, 2)
>>> out2 = add(1, 2)
>>> out1.key == out2.key
True
"""
@wraps(func)
def _dfunc(*args, **kwargs):
return applyfunc(func, args, kwargs, pure=pure)
return _dfunc
def optimize(dsk, keys):
dsk2 = cull(dsk, keys)
return fuse(dsk2)
def compute(*args, **kwargs):
"""Evaluate several ``Value``s at once.
Note that the only difference between this function and
``dask.base.compute`` is that this implicitly converts python objects to
``Value``s, allowing for collections of dask objects to be computed.
Examples
--------
>>> a = value(1)
>>> b = a + 2
>>> c = a + 3
>>> compute(b, c) # Compute both simultaneously
(3, 4)
>>> compute(a, [b, c]) # Works for lists of Values
(1, [3, 4])
"""
args = [value(a) for a in args]
return base.compute(*args, **kwargs)
def right(method):
"""Wrapper to create 'right' version of operator given left version"""
def _inner(self, other):
return method(other, self)
return _inner
class Value(base.Base):
"""Represents a value to be computed by dask.
Equivalent to the output from a single key in a dask graph.
"""
__slots__ = ('_key', '_dasks')
_optimize = staticmethod(optimize)
_finalize = staticmethod(lambda a, r: r[0])
_default_get = staticmethod(threaded.get)
def __init__(self, name, dasks):
object.__setattr__(self, '_key', name)
object.__setattr__(self, '_dasks', dasks)
@property
def dask(self):
return merge(*self._dasks)
@property
def key(self):
return self._key
def _keys(self):
return [self.key]
def __repr__(self):
return "Value({0})".format(repr(self.key))
def __hash__(self):
return hash(self.key)
def __dir__(self):
return list(self.__dict__.keys())
def __getattr__(self, attr):
if not attr.startswith('_'):
return do(getattr, True)(self, attr)
else:
raise AttributeError("Attribute {0} not found".format(attr))
def __setattr__(self, attr, val):
raise TypeError("Value objects are immutable")
def __setitem__(self, index, val):
raise TypeError("Value objects are immutable")
def __iter__(self):
raise TypeError("Value objects are not iterable")
def __call__(self, *args, **kwargs):
return do(apply)(self, args, kwargs)
def __bool__(self):
raise TypeError("Truth of Value objects is not supported")
__nonzero__ = __bool__
__abs__ = do(operator.abs, True)
__add__ = do(operator.add, True)
__and__ = do(operator.and_, True)
__div__ = do(operator.floordiv, True)
__eq__ = do(operator.eq, True)
__floordiv__ = do(operator.floordiv, True)
__ge__ = do(operator.ge, True)
__getitem__ = do(operator.getitem, True)
__gt__ = do(operator.gt, True)
__index__ = do(operator.index, True)
__invert__ = do(operator.invert, True)
__le__ = do(operator.le, True)
__lshift__ = do(operator.lshift, True)
__lt__ = do(operator.lt, True)
__mod__ = do(operator.mod, True)
__mul__ = do(operator.mul, True)
__ne__ = do(operator.ne, True)
__neg__ = do(operator.neg, True)
__or__ = do(operator.or_, True)
__pos__ = do(operator.pos, True)
__pow__ = do(operator.pow, True)
__radd__ = do(right(operator.add), True)
__rand__ = do(right(operator.and_), True)
__rdiv__ = do(right(operator.floordiv), True)
__rfloordiv__ = do(right(operator.floordiv), True)
__rlshift__ = do(right(operator.lshift), True)
__rmod__ = do(right(operator.mod), True)
__rmul__ = do(right(operator.mul), True)
__ror__ = do(right(operator.or_), True)
__rpow__ = do(right(operator.pow), True)
__rrshift__ = do(right(operator.rshift), True)
__rshift__ = do(operator.rshift, True)
__rsub__ = do(right(operator.sub), True)
__rtruediv__ = do(right(operator.truediv), True)
__rxor__ = do(right(operator.xor), True)
__sub__ = do(operator.sub, True)
__truediv__ = do(operator.truediv, True)
__xor__ = do(operator.xor, True)
base.normalize_token.register(Value, lambda a: a.key)
def value(val, name=None):
"""Create a ``Value`` from a python object.
Parameters
----------
val : object
Object to be wrapped.
name : string, optional
Name to be used in the resulting dask.
Examples
--------
>>> a = value([1, 2, 3])
>>> a.compute()
[1, 2, 3]
Values can act as a proxy to the underlying object. Many operators are
supported:
>>> (a + [1, 2]).compute()
[1, 2, 3, 1, 2]
>>> a[1].compute()
2
Method and attribute access also works:
>>> a.count(2).compute()
1
Note that if a method doesn't exist, no error will be thrown until runtime:
>>> res = a.not_a_real_method()
>>> res.compute() # doctest: +SKIP
AttributeError("'list' object has no attribute 'not_a_real_method'")
"""
if isinstance(val, Value):
return val
name = name or tokenize(val, True)
task, dasks = to_task_dasks(val)
dasks.append({name: task})
return Value(name, dasks)
|
|
import sys
from ship import *
from pygame.locals import *
class Game:
def __init__(self):
# initialize game window, etc
pg.init()
pg.font.init()
pg.mixer.init()
self.screen = pg.display.set_mode((width, height), pg.FULLSCREEN)
pg.display.set_caption(Title)
self.clock = pg.time.Clock()
self.running = True
def load_state(self):
pass
def new(self):
# start a new game
self.all_sprites = pg.sprite.Group()
self.ship = Ship(self, 10, 10)
self.paused = False
self.run()
def run(self):
# Game loop
self.playing = True
while self.playing:
self.clock.tick(fps)
self.events()
if not self.paused:
self.update()
self.draw()
def button(self, naam1, naam2, x, y, w, h):
# image, image highlight, x pos, y pos, width, height
mouse = pg.mouse.get_pos()
# als x pos + width groter
if x + w > mouse[0] > x and y + h > mouse[1] > y:
self.screen.blit(naam1, (x, y))
self.screen.blit(naam2, (x, y))
else:
self.screen.blit(naam1, (x, y))
def quit(self):
pg.quit()
sys.exit()
def update(self):
# Game loop - Update
self.all_sprites.update()
def instructions(self):
self.screen.blit(instbg_image, (0, 0))
pg.display.update()
def board_gen(self):
t = 0
x = width / 5
y = 0
self.screen.fill(black)
# obj_()
while y <= height\
:
while x < (width / 5) * 4:
if t % 2 == 0:
self.screen.fill(red, (x, y, width / 33, height
/ 20))
t += 1
x += (width / 33)
else:
self.screen.fill(white, (x, y, width / 33, height
/ 20))
t += 1
x += (width / 33)
else:
t += 1
y += (height
/ 20)
x = width / 5
for event in pg.event.get():
if event.type == pg.MOUSEBUTTONDOWN:
(mousex, mousey) = pg.mouse.get_pos()
pg.display.update()
self.events()
def draw_grid(self):
for x in range(136, width - 136, tilesize):
pg.draw.line(self.screen, light_grey, (x, 0), (x, height))
for y in range(0, height, tilesize):
pg.draw.line(self.screen, light_grey, (0, y), (width, y))
self.screen.blit(board_image, (0, 0))
self.screen.blit(map_image, (width / 5, height / 10))
self.button(menu1_image, menu2_image, 100, 100, 50, 50)
self.screen.blit(kaartn_image, (20, 20))
self.screen.blit(kaarts_image, (700, 20))
mouse = pg.mouse.get_pos()
if 700 + 80 > mouse[0] > 700 and 20 + 160 > mouse[1] > 20:
self.screen.blit(kaart1, (600, 20))
if 20 + 80 > mouse[0] > 20 and 20 + 160 > mouse[1] > 20:
self.screen.blit(kaart2, (70, 20))
for event in pg.event.get():
if event.type == pg.MOUSEBUTTONDOWN:
if 100 + 50 > mouse[0] > 100 and 100 + 50 > mouse[1] > 100:
game.main_menu()
def draw(self):
# Game loop - draw
self.draw_grid()
self.all_sprites.draw(self.screen)
if self.paused:
self.screen.blit(pause_image, (0, 0))
self.screen.blit(pauzet_image, (width / 5, width / 8))
self.button(help1_image, help2_image, 600, 450, 150, 50)
mouse = pg.mouse.get_pos()
if 600 + 130 > mouse[0] > 600 and 450 + 50 > mouse[1] > 50:
self.screen.blit(instbg_image, (0, 0))
self.screen.blit(inst1_image, (100, 0))
pg.display.update()
"""def options(self):
self.screen.fill(aqua)
TextSurf, TextRect = text_objects("options", pg.font.Font('freesansbold.ttf', 60))
TextRect.center = ((width / 2), (height/ 10))
self.screen.blit(TextSurf, TextRect)
self.button("resolution", width / 24, height / 6, width / 6, height / 6, red,red, pg.font.Font('freesansbold.ttf', 20))
self.button("480p", width / 8 * 2, height / 6, width / 6, height / 6, silver,dark_silver, pg.font.Font('freesansbold.ttf', 20))
self.button("720p", width / 8 * 4, height / 6, width / 6, height / 6, silver,dark_silver, pg.font.Font('freesansbold.ttf', 20))
self.button("1080p", width / 8 * 6, height / 6, width / 6, height / 6, silver,dark_silver, pg.font.Font('freesansbold.ttf', 20))
self.button("sound", width / 24, height
/ 6 * 2.5, width / 6, height
/ 6, red,red, pg.font.Font('freesansbold.ttf', 20))
self.button("off", width / 8 * 2, height
/ 6 * 2.5, width / 6, height
/ 6,silver, dark_silver, pg.font.Font('freesansbold.ttf', 20))
self.button("50%", width / 8 * 4, height
/ 6 * 2.5, width / 6, height
/ 6,silver, dark_silver, pg.font.Font('freesansbold.ttf', 20))
self.button("100%", width / 8 * 6, height
/ 6 * 2.5, width / 6, height
/ 6,silver, dark_silver, pg.font.Font('freesansbold.ttf', 20))
self.button("window/full", width / 8, height
/ 6 * 4, width / 4, height
/ 6,silver, dark_silver, pg.font.Font('freesansbold.ttf', 20))
self.button("main menu", width / 8 * 3, height
/ 6 * 4, width / 4, height
/ 6,silver, dark_silver, pg.font.Font('freesansbold.ttf', 20))
self.button("exit", width / 8 * 5, height
/ 6 * 4, width / 4, height
/ 6, silver,dark_silver, pg.font.Font('freesansbold.ttf', 20))
mouse = pg.mouse.get_pos()
pg.display.update()
for event in pg.event.get():
if event.type == pg.MOUSEBUTTONDOWN:
if (width / 8 * 2) + width / 6 > mouse[0] > (width / 8 * 2) and height\
/ 6 + height\
/ 6 > mouse[1] > height\
/ 6:
pg.transform.scale(self.screen, (640, 480))
pg.display.set_mode((640, 480))
height = 480
width = 640
if (width / 8 * 4) + width / 6 > mouse[0] > (width / 8 * 4) and height / 6 + height / 6 > mouse[1] > height / 6:
pg.display.set_mode((1280, 720))
pg.transform.scale(self.screen, (1280, 720))
width = 1280
height = 720
if (width / 8 * 6) + width / 6 > mouse[0] > (width / 8 * 6) and height / 6 + height\
/ 6 > mouse[1] > height\
/ 6:
pg.display.set_mode((1920, 1080))
pg.transform.scale(self.screen, (1920, 1080))
width = 1920
height\
= 1080
if (width / 8 * 2) + width / 6 > mouse[0] > (width / 8 * 2) and height\
/ 6 * 2.5 + height\
/ 6 > mouse[1] > height\
/ 6 * 2.5:
pass # Sounds options worden later toegevoegd
if (width / 8 * 4) + width / 6 > mouse[0] > (width / 8 * 4) and height\
/ 6 * 2.5 + height\
/ 6 > mouse[1] > height\
/ 6 * 2.5:
pass # Sounds options worden later toegevoegd
if (width / 8 * 6) + width / 6 > mouse[0] > (width / 8 * 6) and height\
/ 6 * 2.5 + height\
/ 6 > mouse[1] > height\
/ 6 * 2.5:
pass # Sounds options worden later toegevoegd
if (width / 8) + width / 4 > mouse[0] > (width / 8) and height\
/ 6 * 4 + height\
/ 6 > mouse[1] > height\
/ 6 * 4:
pg.display.set_mode(FULLSCREEN)
if (width / 8 * 3) + width / 4 > mouse[0] > (width / 8 * 3) and height\
/ 6 * 4 + height\
/ 6 > mouse[1] > height\
/ 6 * 4:
self.main_menu()
if (width / 8 * 5) + width / 4 > mouse[0] > (width / 8 * 5) and height\
/ 6 * 4 + height\
/ 6 > mouse[1] > height\
/ 6 * 4:
pg.quit()"""
def events(self):
# Game loop - events
for event in pg.event.get():
if event.type == pg.QUIT:
self.quit()
if event.type == pg.KEYDOWN:
if event.key == pg.K_ESCAPE:
self.quit()
if event.key == pg.K_p:
self.paused = not self.paused
# boot movement
if event.key == pg.K_LEFT:
self.ship.move(dx=-1)
if event.key == pg.K_RIGHT:
self.ship.move(dx=1)
if event.key == pg.K_DOWN:
self.ship.move(dy=1)
if event.key == pg.K_UP:
self.ship.move(dy=-1)
def main_menu(self):
# while in mainmenu
in_main_menu = True
while in_main_menu:
self.screen.blit(bg_image, (0, 0))
self.screen.blit(title_image, (width/5, width/8))
self.button(start_image1, start_image2, 600, 400, 150, 50)
self.button(score1_image, score2_image, 600, 500, 150, 50)
self.button(help1_image, help2_image, 600, 450, 150, 50)
mouse = pg.mouse.get_pos()
pg.display.update()
for event in pg.event.get():
if event.type == MOUSEBUTTONDOWN:
if 600 + 150 > mouse[0] > 600 and 400 + 50 > mouse[1] > 150:
game.new()
if 600 + 150 > mouse[0] > 600 and 500 + 50 > mouse[1] > 150:
pass # options
if 600 + 150 > mouse[0] > 600 and 450 + 50 > mouse[1] > 150:
game.instructions()
if event.type == pg.KEYDOWN:
if event.key == pg.K_ESCAPE:
game.quit()
def show_go_screen(self):
# game over/continue
pass
def Score(self):
pass
game = Game()
game.main_menu()
while game.running:
# game.new()
game.main_menu()
pg.quit()
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import configparser
import hmac
import platform
from urllib.parse import urlparse, urlunparse, quote
import requests
import time
MAFREEBOX_API_URL = 'http://mafreebox.freebox.fr/api/v3/'
CONFIG_FILE = '.fbxconfig'
APP_ID = 't411-tracker-update'
APP_VERSION = '0.0.1'
OLD_TRACKER_HOSTS = (
't411.download:56969',
'tracker.t411.me:56969',
'tracker.t411.me:8880',
'tracker.t411.io:56969',
'tracker.t411.io:8880',
'46.246.117.194:56969',
)
NEW_TRACKER_HOST = 't411.download'
def get_api_result(rep):
if rep.status_code != 200:
print("http request failed %d / %s" % (rep.status_code, rep.content))
exit(1)
try:
res = rep.json()
except ValueError as e:
print("failed to parse response: %s / %s" % (rep.content, e))
exit(1)
return
if 'success' not in res or not res['success']:
print("failed to parse response")
exit(1)
if 'result' in res:
return res['result']
return None
def request_token():
payload = {
'app_id': APP_ID,
'app_name': 'T411 tracker updater',
'app_version': APP_VERSION,
'device_name': platform.node(),
}
rep = requests.post(MAFREEBOX_API_URL + "login/authorize/", json=payload)
result = get_api_result(rep)
if 'app_token' not in result or 'track_id' not in result:
print("Malformed response %s" % rep.content)
exit(1)
app_token = result['app_token']
track_id = result['track_id']
print("Please press the button on the freebox front panel to grand access to freebox config ...")
while True:
time.sleep(2)
print(" ... checking auth status ...")
rep = requests.get(MAFREEBOX_API_URL + "login/authorize/%d" % track_id)
result = get_api_result(rep)
print("result: %s" % result)
if 'status' not in result:
print("unable to get auth status %s" % result)
exit(1)
status = result['status']
if status == 'pending':
continue
if status == 'timeout':
print("... too late. you need to press the button on the freebox front panel !!!")
exit(1)
if status == 'granted':
print("... OK got app_token %s" % app_token)
return app_token
print("unexpected status %s" % status)
exit(1)
def get_freebox_token():
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
app_token = None
if 'freebox' in config and 'app_token' in config['freebox']:
app_token = config['freebox']['app_token']
if not app_token:
print("need to request a token")
app_token = request_token()
# save in conf
config['freebox'] = {}
config['freebox']['app_token'] = app_token
with open(CONFIG_FILE, 'w') as configfile:
config.write(configfile)
return app_token
def get_challenge():
rep = requests.get(MAFREEBOX_API_URL + "login/")
res = get_api_result(rep)
if 'challenge' not in res:
print("failed to get challenge %s" % res)
exit(1)
return res['challenge']
def open_session(app_token):
challenge = get_challenge()
password = hmac.new(app_token.encode('utf-8'), msg=challenge.encode('utf-8'), digestmod='sha1').hexdigest()
rep = requests.post(MAFREEBOX_API_URL + "login/session/", json={
'password': password,
'app_id': APP_ID,
'app_verion': APP_VERSION,
})
res = get_api_result(rep)
if 'session_token' not in res:
print("failed to get session token %s" % res)
exit(1)
return res['session_token']
def get_downloads(session_token):
rep = requests.get(MAFREEBOX_API_URL + "downloads/", headers={
'X-Fbx-App-Auth': session_token
})
res = get_api_result(rep)
return res
def get_download_trackers(session_token, download):
rep = requests.get(MAFREEBOX_API_URL + "downloads/%d/trackers" % download['id'], headers={
'X-Fbx-App-Auth': session_token
})
res = get_api_result(rep)
return res
def tracker_need_update(announce_url):
parts = urlparse(announce_url)
if parts.netloc in OLD_TRACKER_HOSTS:
return True
return False
def update_tracker(session_token, download_id, tracker):
announce_url = tracker['announce']
parts = list(urlparse(announce_url))
parts[1] = NEW_TRACKER_HOST
new_announce = urlunparse(parts)
print("> UPDATE tracker %s ==> %s" % (announce_url, new_announce))
# add new tracker
url = MAFREEBOX_API_URL + ("downloads/%d/trackers" % download_id)
rep = requests.post(url, json={
'announce': new_announce,
'is_enabled': True
}, headers={
'X-Fbx-App-Auth': session_token
})
get_api_result(rep)
# remove prev tracker
url = MAFREEBOX_API_URL + ("downloads/%d/trackers/%s" % (download_id, quote(announce_url, safe='')))
rep = requests.delete(url, headers={
'X-Fbx-App-Auth': session_token
})
get_api_result(rep)
# active new tracker
url = MAFREEBOX_API_URL + ("downloads/%d/trackers/%s" % (download_id, quote(new_announce, safe='')))
rep = requests.delete(url, json={
'is_enabled': True
}, headers={
'X-Fbx-App-Auth': session_token
})
get_api_result(rep)
def update_trackers():
print("Getting app token")
app_token = get_freebox_token()
print("App token: %s" % app_token)
print("Opening session ...")
session_token = open_session(app_token)
print("got session token: %s" % session_token)
print("getting download list")
for d in get_downloads(session_token):
if 'type' not in d or d['type'] != 'bt':
print("> skip %s (not a torrent)" % d['name'])
continue
print("> processing torrent %s" % d['name'])
for t in get_download_trackers(session_token, d):
announce_url = t['announce']
if tracker_need_update(announce_url):
update_tracker(session_token, d['id'], t)
else:
print("> KEEP tracker %s" % announce_url)
if __name__ == '__main__':
update_trackers()
|
|
# -*- coding: utf-8 -*-
"""
This module contains the definition of Stream class.
"""
###############################################################################
from __future__ import division
from collections import Iterable, Sized, deque
from heapq import nlargest, nsmallest, heappush, heappop
from itertools import chain, islice, repeat
from operator import add, truediv
from re import compile as regex_compile
from six import iteritems, advance_iterator
# noinspection PyUnresolvedReferences
from six.moves import filter as ifilter, map as imap, reduce as reduce_func, \
xrange as xxrange
from .iterators import seed, distinct, peek, accumulate, partly_distinct
from .poolofpools import PoolOfPools
from .utils import MaxHeapItem, filter_true, filter_false, value_mapper, \
key_mapper, filter_keys, filter_values, make_list, int_or_none, \
float_or_none, long_or_none, decimal_or_none, unicode_or_none
###############################################################################
class Stream(Iterable, Sized):
"""
Stream class provides you with the basic functionality of Streams. Please
checkout member documentation to get an examples.
"""
WORKERS = PoolOfPools()
SENTINEL = object()
ALL = object()
@classmethod
def concat(cls, *streams):
"""
Lazily concatenates several stream into one. The same as `Java 8
concat <http://docs.oracle.com/javase/8/docs/api/java/util/stream/
Stream.html#concat-java.util.stream.Stream-
java.util.stream.Stream->`_.
:param streams: The :py:class:`Stream` instances you want to
concatenate.
:return: new processed :py:class:`Stream` instance.
>>> stream1 = Stream(range(2))
>>> stream2 = Stream(["2", "3", "4"])
>>> stream3 = Stream([list(), dict()])
>>> concatenated_stream = Stream.concat(stream1, stream2, stream3)
>>> list(concatenated_stream)
... [0, 1, "2", "3", "4", [], {}]
"""
return cls(streams).chain()
@classmethod
def iterate(cls, function, seed_value):
"""
Returns seed stream. The same as for `Java 8 iterate
<http://docs.oracle.com/javase/8/docs/api/java/util/stream/Stream.html
#iterate-T-java.util.function.UnaryOperator->`_.
Returns an infinite sequential ordered Stream produced by iterative
application of a function ``f`` to an initial element seed, producing a
Stream consisting of ``seed``, ``f(seed)``, ``f(f(seed))``, etc.
The first element (position 0) in the Stream will be the provided
seed. For ``n > 0``, the element at position n, will be the result of
applying the function f to the element at position ``n - 1``.
:param function function: The function to apply to the seed.
:param object seed_value: The seed value of the function.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream.iterate(lambda value: value ** 2, 2)
>>> iterator = iter(stream)
>>> next(iterator)
... 2
>>> next(iterator)
... 4
>>> next(iterator)
... 8
"""
return cls(seed(function, seed_value))
@classmethod
def range(cls, *args, **kwargs):
"""
Creates numerial iterator. Absoultely the same as ``Stream.range(10)``
and ``Stream(range(10))`` (in Python 2: ``Stream(xrange(10))``). All
arguments go to :py:func:`range` (:py:func:`xrange`) directly.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream.range(6)
>>> list(stream)
... [0, 1, 2, 3 ,4, 5]
>>> stream = Stream.range(1, 6)
>>> list(stream)
... [1, 2, 3, 4, 5]
>>> stream = Stream.range(1, 6, 2)
>>> list(stream)
... [1, 3, 5]
"""
return cls(xxrange(*args, **kwargs))
def __init__(self, iterator, max_cache=0):
"""
Initializes the :py:class:`Stream`.
Actually it does some smart handling of iterator. If you give it an
instance of :py:class:`dict` or its derivatives (such as
:py:class:`collections.OrderedDict`), it will iterate through it's
items (key and values). Otherwise just normal iterator would be used.
:param Iterable iterator: Iterator which has to be converted into
:py:class:`Stream`.
:param int max_cache: the number of items to cache (defaults to
``Stream.ALL``).
"""
self._max_cache = max_cache
if max_cache == 0:
self._cache = None
else:
max_cache = None if max_cache is self.ALL else max_cache
self._cache = deque(maxlen=max_cache)
if isinstance(iterator, dict):
self.iterator = iteritems(iterator)
else:
self.iterator = iter(iterator)
# noinspection PyTypeChecker
def __len__(self):
"""
To support :py:func:`len` function if given iterator supports it.
"""
return len(self.iterator)
def __iter__(self):
"""
To support iteration protocol.
"""
cache = self._cache
iterator = self.iterator
if cache is None:
for item in iterator:
yield item
else:
for item in cache:
yield item
for item in iterator:
cache.append(item)
yield item
def __reversed__(self):
"""
To support :py:func:`reversed` iterator.
"""
return self.reversed()
@property
def first(self):
"""
Returns a first element from iterator and does not changes internals.
>>> stream = Stream.range(10)
>>> stream.first
... 0
>>> stream.first
... 0
>>> list(stream)
... [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
first_element = advance_iterator(self.iterator)
self.iterator = chain([first_element], self.iterator)
return first_element
def cache(self, max_cache=ALL):
"""Return a stream which caches elements for future iteration.
By default the new stream will cache all elements. If passing an
integer to ``max_cache``, the new stream will cache up to that many of
the most recently iterated elements.
:param int max_cache: the number of items to cache (defaults to
``Stream.ALL``).
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream.range(10).cache()
>>> list(stream)
... [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> list(stream)
... [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> stream = stream.cache(5)
>>> list(stream)
... [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> list(stream)
... [5, 6, 7, 8, 9]
"""
return self.__class__(self, max_cache=max_cache)
def _filter(self, condition, predicate, **concurrency_kwargs):
"""
Does parallel filtering on given ``condition`` with given
`predicate``. Supports parallel execution.
Internal method you do not want to use generally.
"""
mapper = self.WORKERS.get(concurrency_kwargs)
if mapper:
iterator = ((predicate, item) for item in self)
filtered = mapper(condition, iterator)
filtered = (result for suitable, result in filtered if suitable)
else:
filtered = ifilter(predicate, self)
return self.__class__(filtered)
def filter(self, predicate, **concurrency_kwargs):
"""
Does filtering according to the given ``predicate`` function. Also it
supports parallelization (if predicate is pretty heavy function).
You may consider it as equivalent of :py:func:`itertools.ifilter` but
for stream with a possibility to parallelize this process.
:param function predicate: Predicate for filtering elements of the
:py:class:`Stream`.
:param dict concurrency_kwargs: The same concurrency keywords as for
:py:meth:`Stream.map`.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream.range(5)
>>> stream = stream.filter(lambda item: item % 2 == 0)
>>> list(stream)
... [0, 2, 4]
"""
return self._filter(filter_true, predicate, **concurrency_kwargs)
def exclude(self, predicate, **concurrency_kwargs):
"""
Excludes items from :py:class:`Stream` according to the predicate.
You can consider behaviour as the same as for
:py:func:`itertools.ifilterfalse`.
As :py:meth:`Stream.filter` it also supports parallelization. Please
checkout :py:meth:`Stream.map` keyword arguments.
:param function predicate: Predicate for filtering elements of the
:py:class:`Stream`.
:param dict concurrency_kwargs: The same concurrency keywords as for
:py:meth:`Stream.map`.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream.range(6)
>>> stream = stream.exclude(lambda item: item % 2 == 0)
>>> list(stream)
... [1, 3, 5]
"""
return self._filter(filter_false, predicate, **concurrency_kwargs)
def regexp(self, regexp, flags=0):
"""
Filters stream according to the regular expression using
:py:func:`re.match`. It also supports the same flags as
:py:func:`re.match`.
:param str regexp: Regular expression for filtering.
:param int flags: Flags from :py:mod:`re`.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream.range(100)
>>> stream = stream.strings()
>>> stream = stream.regexp(r"^1")
>>> list(stream)
... ['1', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19']
"""
regexp = regex_compile(regexp, flags)
return self.filter(regexp.match)
def divisible_by(self, number):
"""
Filters stream for the numbers divisible by the given one.
:param int number: Number which every element should be divisible by.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream.range(6)
>>> stream = stream.divisible_by(2)
>>> list(stream)
... [0, 2, 4]
"""
return self.filter(lambda item: item % number == 0)
def evens(self):
"""
Filters and keeps only even numbers from the stream.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream.range(6)
>>> stream = stream.evens()
>>> list(stream)
... [0, 2, 4]
"""
return self.divisible_by(2)
def odds(self):
"""
Filters and keeps only odd numbers from the stream.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream.range(6)
>>> stream = stream.odds()
>>> list(stream)
... [1, 3, 5]
"""
return self.filter(lambda item: item % 2 != 0)
def instances_of(self, cls):
"""
Filters and keeps only instances of the given class.
:param class cls: Class for filtering.
:return: new processed :py:class:`Stream` instance.
>>> int_stream = Stream.range(4)
>>> str_stream = Stream.range(4).strings()
>>> result_stream = Stream.concat(int_stream, str_stream)
>>> result_stream = result_stream.instances_of(str)
>>> list(result_stream)
... ['0', '1', '2', '3']
"""
return self.filter(lambda item: isinstance(item, cls))
def exclude_nones(self):
"""
Excludes ``None`` from the stream.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream([1, 2, None, 3, None, 4])
>>> stream = stream.exclude_nones()
>>> list(stream)
... [1, 2, 3, 4]
"""
return self.filter(lambda item: item is not None)
def only_nones(self):
"""
Keeps only ``None`` in the stream (for example, for counting).
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream([1, 2, None, 3, None, 4])
>>> stream = stream.only_nones()
>>> list(stream)
... [None, None]
"""
return self.filter(lambda item: item is None)
def only_trues(self):
"""
Keeps only those elements where ``bool(element) == True``.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream([1, 2, None, 0, {}, [], 3])
>>> stream = stream.only_trues()
>>> list(stream)
... [1, 2, 3]
"""
return self.filter(bool)
def only_falses(self):
"""
Keeps only those elements where ``bool(item) == False``.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream([1, 2, None, 0, {}, [], 3])
>>> stream = stream.only_trues()
>>> list(stream)
... [None, 0, {}, []]
Opposite to :py:meth:`Stream.only_trues`.
"""
return self.filter(lambda item: not bool(item))
def ints(self):
"""
Tries to convert everything to :py:func:`int` and keeps only
successful attempts.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream([1, 2, "3", "4", None, {}, 5])
>>> stream = stream.ints()
>>> list(stream)
... [1, 2, 3, 4, 5]
.. note::
It is not the same as ``stream.map(int)`` because it removes failed
attempts.
"""
return self.map(int_or_none).exclude_nones()
def floats(self):
"""
Tries to convert everything to :py:func:`float` and keeps only
successful attempts.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream([1, 2, "3", "4", None, {}, 5])
>>> stream = stream.floats()
>>> list(stream)
... [1.0, 2.0, 3.0, 4.0, 5.0]
.. note::
It is not the same as ``stream.map(float)`` because it removes
failed attempts.
"""
return self.map(float_or_none).exclude_nones()
def longs(self):
"""
Tries to convert everything to :py:func:`long` and keeps only
successful attempts.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream([1, 2, "3", "4", None, {}, 5])
>>> stream = stream.longs()
>>> list(stream)
... [1L, 2L, 3L, 4L, 5L]
.. note::
It is not the same as ``stream.map(long)`` because it removes
failed attempts.
"""
return self.map(long_or_none).exclude_nones()
def decimals(self):
"""
Tries to convert everything to :py:class:`decimal.Decimal` and keeps
only successful attempts.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream([1, 2.0, "3", "4.0", None, {}])
>>> stream = stream.longs()
>>> list(stream)
... [Decimal('1'), Decimal('2'), Decimal('3'), Decimal('4.0')]
.. note::
It is not the same as ``stream.map(Decimal)`` because it removes
failed attempts.
.. note::
It tries to use ``cdecimal`` module if possible.
"""
return self.map(decimal_or_none).exclude_nones()
def strings(self):
"""
Tries to convert everything to :py:func:`unicode` (:py:class:`str`
for Python 3) and keeps only successful attempts.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream([1, 2.0, "3", "4.0", None, {}])
>>> stream = stream.strings()
>>> list(stream)
... ['1', '2.0', '3', '4.0', 'None', '{}']
.. note::
It is not the same as ``stream.map(str)`` because it removes
failed attempts.
.. note::
It tries to convert to :py:class:`unicode` if possible,
not :py:class:`bytes`.
"""
return self.map(unicode_or_none).exclude_nones()
def tuplify(self, clones=2):
"""
Tuplifies iterator. Creates a tuple from iterable with ``clones``
elements.
:param int clones: The count of elements in result tuple.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream.range(2)
>>> stream = stream.tuplify(3)
>>> list(stream)
... [(0, 0, 0), (1, 1, 1)]
"""
return self.__class__(tuple(repeat(item, clones)) for item in self)
def map(self, predicate, **concurrency_kwargs):
"""
The corner method of the :py:class:`Stream` and others are basing on
it. It supports parallelization out of box. Actually it works just like
:py:func:`itertools.imap`.
:param function predicate: Predicate to map each element of the
:py:class:`Stream`.
:param dict concurrency_kwargs: The same concurrency keywords.
:return: new processed :py:class:`Stream` instance.
Parallelization is configurable by keywords. There is 2 keywords
supported: ``parallel`` and ``process``. If you set one keyword to
``True`` then :py:class:`Stream` would try to map everything
concurrently. If you want more intelligent tuning just set the number
of workers you want.
For example, you have a list of URLs to fetch
>>> stream = Stream(urls)
You can fetch them in parallel
>>> stream.map(requests.get, parallel=True)
By default, the number of workers is the number of cores on your
computer. But if you want to have 64 workers, you are free to do it
>>> stream.map(requests.get, parallel=64)
The same for ``process`` which will try to use processes.
>>> stream.map(requests.get, process=True)
and
>>> stream.map(requests.get, process=64)
.. note::
Python multiprocessing has its caveats and pitfalls, please use
it carefully (especially ``predicate``). Read the documentation on
:py:mod:`multiprocessing` and try to google best practices.
.. note::
If you set both ``parallel`` and ``process`` keywords only
``parallel`` would be used. If you want to disable some type of
concurrency just set it to ``None``.
>>> stream.map(requests.get, parallel=None, process=64)
is equal to
>>> stream.map(requests.get, process=64)
The same for ``parallel``
>>> stream.map(requests.get, parallel=True, process=None)
is equal to
>>> stream.map(requests.get, parallel=True)
.. note::
By default no concurrency is used.
"""
mapper = self.WORKERS.get(concurrency_kwargs)
if not mapper:
mapper = imap
return self.__class__(mapper(predicate, self))
def _kv_map(self, mapper, predicate, **concurrency_kwargs):
"""
Internal method for :py:meth:`Stream.value_map` and
:py:meth:`Stream.key_map`. Do not use it outside.
"""
iterator = ((predicate, item) for item in self)
stream = self.__class__(iterator)
return stream.map(mapper, **concurrency_kwargs)
def value_map(self, predicate, **concurrency_kwargs):
"""
Maps only value in (key, value) pair. If element is single one, then
it would be :py:meth:`Stream.tuplify` first.
:param function predicate: Predicate to apply to the value of element
in the :py:class:`Stream`.
:param dict concurrency_kwargs: The same concurrency keywords as for
:py:meth:`Stream.map`.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream.range(4)
>>> stream = stream.tuplify()
>>> stream = stream.value_map(lambda item: item ** 3)
>>> list(stream)
... [(0, 0), (1, 1), (2, 8), (3, 27)]
>>> stream = Stream.range(4)
>>> stream = stream.value_map(lambda item: item ** 3)
>>> list(stream)
... [(0, 0), (1, 1), (2, 8), (3, 27)]
"""
return self._kv_map(value_mapper, predicate, **concurrency_kwargs)
def key_map(self, predicate, **concurrency_kwargs):
"""
Maps only key in (key, value) pair. If element is single one, then
it would be :py:meth:`Stream.tuplify` first.
:param function predicate: Predicate to apply to the key of element in
the :py:class:`Stream`.
:param dict concurrency_kwargs: The same concurrency keywords as for
:py:meth:`Stream.map`.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream.range(4)
>>> stream = stream.tuplify()
>>> stream = stream.key_map(lambda item: item ** 3)
>>> list(stream)
... [(0, 0), (1, 1), (8, 2), (27, 3)]
>>> stream = Stream.range(4)
>>> stream = stream.key_map(lambda item: item ** 3)
>>> list(stream)
... [(0, 0), (1, 1), (8, 2), (27, 3)]
"""
return self._kv_map(key_mapper, predicate, **concurrency_kwargs)
def distinct(self):
"""
Removes duplicates from the stream.
:return: new processed :py:class:`Stream` instance.
.. note::
All objects in the stream have to be hashable (support
:py:meth:`__hash__`).
.. note::
Please use it carefully. It returns new :py:class:`Stream` but will
keep every element in your memory.
"""
return self.__class__(distinct(self))
def partly_distinct(self):
"""
Excludes some duplicates from the memory.
:return: new processed :py:class:`Stream` instance.
.. note::
All objects in the stream have to be hashable (support
:py:meth:`__hash__`).
.. note::
It won't guarantee you that all duplicates will be removed
especially if your stream is pretty big and cardinallity is huge.
"""
return self.__class__(partly_distinct(self))
def sorted(self, key=None, reverse=False):
"""
Sorts the stream elements.
:param function key: Key function for sorting
:param bool reverse: Do we need to sort in descending order?
:return: new processed :py:class:`Stream` instance.
... note::
Of course no magic here, we need to fetch all elements for sorting
into the memory.
"""
return self.__class__(sorted(self, reverse=reverse, key=key))
def reversed(self):
"""
Reverses the stream.
:return: new processed :py:class:`Stream` instance.
... note::
If underlying iterator won't support reversing, we are in trouble
and need to fetch everything into the memory.
"""
try:
iterator = reversed(self.iterator)
except TypeError:
iterator = reversed(list(self.iterator))
return self.__class__(iterator)
def peek(self, predicate):
"""
Does the same as `Java 8 peek <http://docs.oracle.com/javase/8/docs/
api/java/util/stream/Stream.html#peek-java.util.function.Consumer->`_.
:param function predicate: Predicate to apply on each element.
:return: new processed :py:class:`Stream` instance.
Returns a stream consisting of the elements of this stream,
additionally performing the provided action on each element as
elements are consumed from the resulting stream.
"""
return self.__class__(peek(self, predicate))
def limit(self, size):
"""
Limits stream to given ``size``.
:param int size: The size of new :py:class:`Stream`.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream.range(1000)
>>> stream = stream.limit(5)
>>> list(stream)
... [0, 1, 2, 3, 4]
"""
return self.__class__(islice(self, size))
def skip(self, size):
"""
Skips first ``size`` elements.
:param int size: The amount of elements to skip.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream.range(10)
>>> stream = stream.skip(5)
>>> list(stream)
... [5, 6, 7, 8, 9]
"""
return self.__class__(islice(self, size, None))
def keys(self):
"""
Iterates only keys from the stream (first element from the
:py:class:`tuple`). If element is single then it will be used.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream.range(5)
>>> stream = stream.key_map(lambda item: item ** 3)
>>> stream = stream.keys()
>>> list(stream)
... [0, 1, 8, 27, 64]
"""
return self.map(filter_keys)
def values(self):
"""
Iterates only values from the stream (last element from the
:py:class:`tuple`). If element is single then it will be used.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream.range(5)
>>> stream = stream.key_map(lambda item: item ** 3)
>>> stream = stream.values()
>>> list(stream)
... [0, 1, 2, 3, 4]
"""
return self.map(filter_values)
def chain(self):
"""
If elements of the stream are iterable, tries to flat that stream.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream.range(3)
>>> stream = stream.tuplify()
>>> stream = stream.chain()
>>> list(stream)
>>> [0, 0, 1, 1, 2, 2]
"""
return self.__class__(chain.from_iterable(self))
def largest(self, size):
"""
Returns ``size`` largest elements from the stream.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream.range(3000)
>>> stream.largest(5)
>>> list(stream)
>>> [2999, 2998, 2997, 2996, 2995]
"""
return self.__class__(nlargest(size, self))
def smallest(self, size):
"""
Returns ``size`` largest elements from the stream.
:return: new processed :py:class:`Stream` instance.
>>> stream = Stream.range(3000)
>>> stream.smallest(5)
>>> list(stream)
>>> [0, 1, 2, 3, 4]
"""
return self.__class__(nsmallest(size, self))
def reduce(self, function, initial=SENTINEL):
"""
Applies :py:func:`reduce` for the iterator
:param function function: Reduce function
:param object initial: Initial value (if nothing set, first element)
would be used.
>>> Stream = stream.range(5)
>>> stream.reduce(operator.add)
... 10
"""
iterator = iter(self)
if initial is self.SENTINEL:
initial = advance_iterator(iterator)
return reduce_func(function, iterator, initial)
def sum(self):
"""
Returns the sum of elements in the stream.
>>> Stream = stream.range(10)
>>> stream = stream.decimals()
>>> stream = stream.sum()
... Decimal('45')
.. note::
Do not use :py:func:`sum` here. It does sum regarding to defined
:py:meth:`__add__` of the classes. So it can sum
:py:class:`decimal.Decimal` with :py:class:`int` for example.
"""
iterator = accumulate(self, add)
last = advance_iterator(iterator)
for item in iterator:
last = item
return last
def count(self, element=SENTINEL):
"""
Returns the number of elements in the stream. If ``element`` is set,
returns the count of particular element in the stream.
:param object element: The element we need to count in the stream
:return: The number of elements of the count of particular element.
"""
if element is not self.SENTINEL:
return sum((1 for item in self if item is element))
if hasattr(self.iterator, "__len__"):
# noinspection PyTypeChecker
return len(self.iterator)
return sum((1 for _ in self))
def average(self):
"""
Calculates the average of elements in the stream.
:return: The average of elements.
>>> stream = Stream.range(10000)
>>> stream.average()
... 4999.5
"""
counter = 1
iterator = iter(self)
total = advance_iterator(iterator)
for item in iterator:
total = add(total, item)
counter += 1
return truediv(total, counter)
def nth(self, nth_element):
"""
Returns Nth element from the stream.
:param int nth_element: Number of element to return.
:return: Nth element.
>>> stream = Stream.range(10000)
>>> stream.average()
... 4999.5
.. note::
Please be noticed that all elements from the stream would be
fetched in the memory (except of the case where
``nth_element == 1``).
"""
if nth_element == 1:
return min(self)
self.iterator = make_list(self.iterator)
if nth_element <= len(self.iterator):
return max(self.smallest(nth_element))
def median(self):
"""
Returns median value from the stream.
:return: The median of the stream.
>>> stream = Stream.range(10000)
>>> stream.median()
... 5000
.. note::
Please be noticed that all elements from the stream would be
fetched in the memory.
"""
biggest, smallest = [], []
iterator = iter(self)
first_elements = list(islice(iterator, 2))
if not first_elements:
return None
if len(first_elements) == 1:
return first_elements[0]
first, last = first_elements
if first > last:
first, last = last, first
smallest.append(MaxHeapItem(first))
biggest.append(last)
for item in iterator:
if item < smallest[0].value:
heappush(smallest, MaxHeapItem(item))
else:
heappush(biggest, item)
if len(smallest) > len(biggest) + 1:
heappush(biggest, heappop(smallest).value)
elif len(biggest) > len(smallest) + 1:
heappush(smallest, MaxHeapItem(heappop(biggest)))
biggest_item = max(biggest, smallest, key=len)[0]
if isinstance(biggest_item, MaxHeapItem):
return biggest_item.value
return biggest_item
def any(self, predicate=bool, **concurrency_kwargs):
"""
Check if any element matching given ``predicate`` exists in the stream.
If ``predicate`` is not defined, :py:func:`bool` is used.
:param function predicate: Predicate to apply to each element of the
:py:class:`Stream`.
:param dict concurrency_kwargs: The same concurrency keywords as for
:py:meth:`Stream.map`.
:return: The result if we have matched elements or not.
>>> stream = Stream.range(5)
>>> stream.any(lambda item: item < 100)
... True
"""
if predicate is None:
iterator = iter(self)
else:
iterator = self.map(predicate, **concurrency_kwargs)
return any(iterator)
def all(self, predicate=bool, **concurrency_kwargs):
"""
Check if all elements matching given ``predicate`` exist in the stream.
If ``predicate`` is not defined, :py:func:`bool` is used.
:param function predicate: Predicate to apply to each element of the
:py:class:`Stream`.
:param dict concurrency_kwargs: The same concurrency keywords as for
:py:meth:`Stream.map`.
:return: The result if we have matched elements or not.
>>> stream = Stream.range(5)
>>> stream.all(lambda item: item > 100)
... False
"""
if predicate is None:
iterator = iter(self)
else:
iterator = self.map(predicate, **concurrency_kwargs)
return all(iterator)
|
|
import datetime
import logging
import pdb
import signals
from django import forms
from django.db.models import Q
from django.forms.extras.widgets import SelectDateWidget
from django.utils.translation import ugettext_lazy as _, ugettext
from l10n.models import Country
from livesettings import config_value
from satchmo_store.contact.models import Contact, AddressBook, PhoneNumber, Organization, ContactRole
from satchmo_store.shop.models import Config
from satchmo_store.shop.utils import clean_field
from signals_ahoy.signals import form_init, form_initialdata, form_postsave
log = logging.getLogger('satchmo_store.contact.forms')
selection = ''
def area_choices_for_country(country, translator=_):
choices = [('', translator("Not Applicable"))]
if country:
areas = country.adminarea_set.filter(active=True)
if areas.count() > 0:
choices = [('', translator("---Please Select---"))]
choices.extend([(area.abbrev or area.name, area.name) for area in areas])
return choices
class ProxyContactForm(forms.Form):
def __init__(self, *args, **kwargs):
self._contact = kwargs.pop('contact', None)
super(ProxyContactForm, self).__init__(*args, **kwargs)
class ContactInfoForm(ProxyContactForm):
email = forms.EmailField(max_length=75, label=_('Email'), required=False)
title = forms.CharField(max_length=30, label=_('Title'), required=False)
first_name = forms.CharField(max_length=30, label=_('First Name'), required=False)
last_name = forms.CharField(max_length=30, label=_('Last Name'), required=False)
phone = forms.CharField(max_length=30, label=_('Phone'), required=False)
addressee = forms.CharField(max_length=61, label=_('Addressee'), required=False)
organization = forms.CharField(max_length=50, label=_('Organization'), required=False)
street1 = forms.CharField(max_length=30, label=_('Street'), required=False)
street2 = forms.CharField(max_length=30, required=False)
city = forms.CharField(max_length=30, label=_('City'), required=False)
state = forms.CharField(max_length=30, label=_('State'), required=False)
postal_code = forms.CharField(max_length=10, label=_('ZIP code/Postcode'), required=False)
copy_address = forms.BooleanField(label=_('Shipping same as billing?'), required=False)
ship_addressee = forms.CharField(max_length=61, label=_('Addressee'), required=False)
ship_street1 = forms.CharField(max_length=30, label=_('Street'), required=False)
ship_street2 = forms.CharField(max_length=30, required=False)
ship_city = forms.CharField(max_length=30, label=_('City'), required=False)
ship_state = forms.CharField(max_length=30, label=_('State'), required=False)
ship_postal_code = forms.CharField(max_length=10, label=_('ZIP code/Postcode'), required=False)
next = forms.CharField(max_length=200, widget=forms.HiddenInput(), required=False)
def __init__(self, *args, **kwargs):
initial = kwargs.get('initial', {})
form_initialdata.send(ContactInfoForm, form=self, initial=initial, contact=kwargs.get('contact', None))
kwargs['initial'] = initial
shop = kwargs.pop('shop', None)
shippable = kwargs.pop('shippable', True)
super(ContactInfoForm, self).__init__(*args, **kwargs)
if not shop:
shop = Config.objects.get_current()
self._shop = shop
self._shippable = shippable
self.required_billing_data = config_value('SHOP', 'REQUIRED_BILLING_DATA')
self.required_shipping_data = config_value('SHOP', 'REQUIRED_SHIPPING_DATA')
self._local_only = shop.in_country_only
self.enforce_state = config_value('SHOP', 'ENFORCE_STATE')
self._default_country = shop.sales_country
billing_country = (self._contact and getattr(self._contact.billing_address, 'country', None)) or self._default_country
shipping_country = (self._contact and getattr(self._contact.shipping_address, 'country', None)) or self._default_country
self.fields['country'] = forms.ModelChoiceField(shop.countries(), required=False, label=_('Country'), empty_label=None, initial=billing_country.pk)
self.fields['ship_country'] = forms.ModelChoiceField(shop.countries(), required=False, label=_('Country'), empty_label=None, initial=shipping_country.pk)
if self.enforce_state:
# if self.is_bound and not self._local_only:
if self.is_bound and not self._local_only:
# If the user has already chosen the country and submitted,
# populate accordingly.
#
# We don't really care if country fields are empty;
# area_choices_for_country() handles those cases properly.
billing_country_data = clean_field(self, 'country')
shipping_country_data = clean_field(self, 'ship_country')
# Has the user selected a country? If so, use it.
if billing_country_data:
billing_country = billing_country_data
if clean_field(self, "copy_address"):
shipping_country = billing_country
elif shipping_country_data:
shipping_country = shipping_country_data
# Get areas for the initial country selected.
billing_areas = area_choices_for_country(billing_country)
shipping_areas = area_choices_for_country(shipping_country)
billing_state = (self._contact and getattr(self._contact.billing_address, 'state', None)) or selection
self.fields['state'] = forms.ChoiceField(choices=billing_areas,
initial=billing_state,
label=_('State'),
# if there are not states, then don't make it required. (first
# choice is always either "--Please Select--", or "Not
# Applicable")
required=len(billing_areas) > 1)
shipping_state = (self._contact and getattr(self._contact.shipping_address, 'state', None)) or selection
self.fields['ship_state'] = forms.ChoiceField(choices=shipping_areas, initial=shipping_state, required=False, label=_('State'))
for fname in self.required_billing_data:
if fname == 'country' and self._local_only:
continue
# ignore the user if ENFORCE_STATE is on; if there aren't any
# states, we might have made the billing state field not required in
# the enforce_state block earlier, and we don't want the user to
# make it required again.
if fname == 'state' and self.enforce_state:
continue
self.fields[fname].required = True
# if copy_address is on, turn of django's validation for required fields
if not (self.is_bound and clean_field(self, "copy_address")):
for fname in self.required_shipping_data:
if fname == 'country' and self._local_only:
continue
self.fields['ship_%s' % fname].required = True
# slap a star on the required fields
for f in self.fields:
fld = self.fields[f]
if fld.required:
fld.label = (fld.label or f) + '*'
log.info('Sending form_init signal: %s', ContactInfoForm)
form_init.send(ContactInfoForm, form=self)
def _check_state(self, data, country):
if country and self.enforce_state and country.adminarea_set.filter(active=True).count() > 0:
if not data or data == selection:
pdb.set_trace()
raise forms.ValidationError(
self._local_only and _('This field is required.') \
or _('State is required for your country.'))
if (country.adminarea_set
.filter(active=True)
.filter(Q(name__iexact=data) | Q(abbrev__iexact=data))
.count() != 1):
raise forms.ValidationError(_('Invalid state or province.'))
print "_check_state_returns: %s" % data
return data
def clean_email(self):
"""Prevent account hijacking by disallowing duplicate emails."""
email = self.cleaned_data.get('email', None)
if self._contact:
if self._contact.email and self._contact.email == email:
return email
users_with_email = Contact.objects.filter(email=email)
if len(users_with_email) == 0:
return email
if len(users_with_email) > 1 or users_with_email[0].id != self._contact.id:
raise forms.ValidationError(
ugettext("That email address is already in use."))
return email
def clean_postal_code(self):
print
print ">>> in_clean_postal_code"
postcode = self.cleaned_data.get('postal_code')
print "postcode: %s" % postcode
if not postcode and 'postal_code' not in self.required_billing_data:
print "return_postcode_1"
return postcode
country = None
if self._local_only:
shop_config = Config.objects.get_current()
country = shop_config.sales_country
else:
country = clean_field(self, 'country')
if not country:
# Either the store is misconfigured, or the country was
# not supplied, so the country validation will fail and
# we can defer the postcode validation until that's fixed.
print "return_postcode_2"
return postcode
print "return_postcode_3"
return self.validate_postcode_by_country(postcode, country)
def clean_state(self):
data = self.cleaned_data.get('state')
if self._local_only:
country = self._default_country
else:
country = clean_field(self, 'country')
if country == None:
pdb.set_trace()
raise forms.ValidationError(_('This field is required.'))
self._check_state(data, country)
return data
def clean_addressee(self):
if not self.cleaned_data.get('addressee'):
first_and_last = u' '.join((self.cleaned_data.get('first_name', ''),
self.cleaned_data.get('last_name', '')))
return first_and_last
else:
return self.cleaned_data['addressee']
def clean_ship_addressee(self):
if not self.cleaned_data.get('ship_addressee') and \
not self.cleaned_data.get('copy_address'):
first_and_last = u' '.join((self.cleaned_data.get('first_name', ''),
self.cleaned_data.get('last_name', '')))
return first_and_last
else:
return self.cleaned_data['ship_addressee']
def clean_country(self):
if self._local_only:
return self._default_country
else:
if not self.cleaned_data.get('country'):
log.error("No country! Got '%s'" % self.cleaned_data.get('country'))
pdb.set_trace()
raise forms.ValidationError(_('This field is required.'))
return self.cleaned_data['country']
def clean_ship_country(self):
copy_address = clean_field(self, 'copy_address')
if copy_address:
return self.cleaned_data.get('country')
if self._local_only:
return self._default_country
if not self._shippable:
return self.cleaned_data.get('country')
shipcountry = self.cleaned_data.get('ship_country')
if not shipcountry:
pdb.set_trace()
raise forms.ValidationError(_('This field is required.'))
if config_value('PAYMENT', 'COUNTRY_MATCH'):
country = self.cleaned_data.get('country')
if shipcountry != country:
raise forms.ValidationError(_('Shipping and Billing countries must match'))
return shipcountry
def ship_charfield_clean(self, field_name):
if self.cleaned_data.get('copy_address'):
self.cleaned_data['ship_' + field_name] = clean_field(self, field_name)
return self.cleaned_data['ship_' + field_name]
else:
val = clean_field(self, 'ship_' + field_name)
# REQUIRED_SHIPPING_DATA doesn't contain 'ship_' prefix
if (not val) and field_name in self.required_shipping_data:
pdb.set_trace()
raise forms.ValidationError(_('This field is required.'))
return val
def clean_ship_street1(self):
return self.ship_charfield_clean('street1')
def clean_ship_street2(self):
if self.cleaned_data.get('copy_address'):
if 'street2' in self.cleaned_data:
self.cleaned_data['ship_street2'] = self.cleaned_data.get('street2')
return self.cleaned_data.get('ship_street2')
def clean_ship_city(self):
rc = self.ship_charfield_clean('city')
print "clean_city_rc: %s" % rc
return rc
def clean_ship_postal_code(self):
code = self.ship_charfield_clean('postal_code')
print
print "in_clean_ship_postal_code: %s" % code
if not self._shippable:
print " not_shippable_return: %s" % code
return code
if clean_field(self, 'copy_address'):
# We take it that the country for shipping and billing is the same;
# don't bother validating again
print " shipping and billing country is the same - clean_field: %s" % code
print " return: %s" % code
return code
country = None
if self._local_only:
print " local_only"
shop_config = Config.objects.get_current()
country = shop_config.sales_country
else:
country = self.ship_charfield_clean('country')
print " country: %s" % country
if not country:
# Either the store is misconfigured, or the country was
# not supplied, so the country validation will fail and
# we can defer the postcode validation until that's fixed.
return code
print " return_validate_postcode_by_country: code: %s country: %s" % (code, country)
return self.validate_postcode_by_country(code, country)
def clean_ship_state(self):
data = self.cleaned_data.get('ship_state')
if self.cleaned_data.get('copy_address'):
if 'state' in self.cleaned_data:
self.cleaned_data['ship_state'] = self.cleaned_data['state']
return self.cleaned_data['ship_state']
if self._local_only:
country = self._default_country
else:
country = self.ship_charfield_clean('country')
self._check_state(data, country)
return data
def save(self, **kwargs):
if "contact" in kwargs:
kwargs['contact'] = None
return self.save_info(**kwargs)
def save_info(self, contact=None, **kwargs):
"""
Save the contact info into the database.
Checks to see if contact exists. If not, creates a contact
and copies in the address and phone number.
"""
if not contact:
customer = Contact()
log.debug('creating new contact')
else:
customer = contact
log.debug('Saving contact info for %s', contact)
data = self.cleaned_data.copy()
country = data['country']
if not isinstance(country, Country):
country = Country.objects.get(pk=country)
data['country'] = country
data['country_id'] = country.id
shipcountry = data['ship_country']
if not isinstance(shipcountry, Country):
shipcountry = Country.objects.get(pk=shipcountry)
data['ship_country'] = shipcountry
data['ship_country_id'] = shipcountry.id
organization_name = data.pop('organization', None)
if organization_name:
org = Organization.objects.by_name(organization_name, create=True)
customer.organization = org
else:
# in case customer wants to remove organization name from their profile
customer.organization = None
for field in customer.__dict__.keys():
try:
setattr(customer, field, data[field])
except KeyError:
pass
if not customer.role:
customer.role = ContactRole.objects.get(pk='Customer')
customer.save()
# we need to make sure we don't blindly add new addresses
# this isn't ideal, but until we have a way to manage addresses
# this will force just the two addresses, shipping and billing
# TODO: add address management like Amazon.
bill_address = customer.billing_address
if not bill_address:
bill_address = AddressBook(contact=customer)
changed_location = False
address_keys = bill_address.__dict__.keys()
for field in address_keys:
if (not changed_location) and field in ('state', 'country_id', 'city'):
if getattr(bill_address, field) != data[field]:
changed_location = True
try:
setattr(bill_address, field, data[field])
except KeyError:
pass
bill_address.is_default_billing = True
copy_address = data['copy_address']
ship_address = customer.shipping_address
try:
setattr(ship_address, "addressee", data.get('ship_addressee', ""))
setattr(bill_address, "addressee", data.get('addressee', ""))
except AttributeError:
pass
# If we are copying the address and one isn't in place for shipping
# copy it
if not getattr(ship_address, "addressee", False) and copy_address:
try:
ship_address.addressee = bill_address.addressee
except AttributeError:
pass
# Make sure not to overwrite a custom ship to name
if copy_address and getattr(ship_address, "addressee", "") == getattr(bill_address, "addressee", ""):
# make sure we don't have any other default shipping address
if ship_address and ship_address.id != bill_address.id:
ship_address.delete()
bill_address.is_default_shipping = True
bill_address.save()
# If we have different ship to and bill to names, preserve them
if not copy_address or getattr(ship_address, "addressee", "") != getattr(bill_address, "addressee", ""):
if not ship_address or ship_address.id == bill_address.id:
ship_address = AddressBook()
for field in address_keys:
ship_field = 'ship_' + field
if (not changed_location) and field in ('state', 'country_id', 'city'):
if getattr(ship_address, field) != data[ship_field]:
changed_location = True
try:
setattr(ship_address, field, data[ship_field])
except KeyError:
pass
ship_address.is_default_shipping = True
ship_address.is_default_billing = False
ship_address.contact = customer
ship_address.save()
if not customer.primary_phone:
phone = PhoneNumber()
phone.primary = True
else:
phone = customer.primary_phone
phone.phone = data['phone']
phone.contact = customer
phone.save()
form_postsave.send(ContactInfoForm, object=customer, formdata=data, form=self)
if changed_location:
signals.satchmo_contact_location_changed.send(self, contact=customer)
return customer.id
def validate_postcode_by_country(self, postcode, country):
responses = signals.validate_postcode.send(self, postcode=postcode, country=country)
print "responses: %s" % responses
# allow responders to reformat the code, but if they don't return
# anything, then just use the existing code
for responder, response in responses:
if response:
return response
return postcode
class DateTextInput(forms.TextInput):
def render(self, name, value, attrs=None):
if isinstance(value, datetime.date):
value = value.strftime("%m.%d.%Y")
return super(DateTextInput, self).render(name, value, attrs)
class ExtendedContactInfoForm(ContactInfoForm):
"""Contact form which includes birthday and newsletter."""
years_to_display = range(datetime.datetime.now().year - 100,
datetime.datetime.now().year + 1)
dob = forms.DateField(widget=SelectDateWidget(years=years_to_display),
required=False)
newsletter = forms.BooleanField(label=_('Newsletter'),
widget=forms.CheckboxInput(),
required=False)
class AddressBookForm(forms.Form):
addressee_name = forms.CharField(max_length=61, label=_('Addressee Name'), required=True)
description = forms.CharField(max_length=20,
label=_('Description'),
required=False)
street1 = forms.CharField(max_length=30, label=_('Street'), required=True)
street2 = forms.CharField(max_length=30, required=False)
city = forms.CharField(max_length=30, label=_('City'), required=True)
state = forms.CharField(max_length=30, label=_('State'), required=True)
postal_code = forms.CharField(max_length=10, label=_('ZIP code/Postcode'), required=True)
def __init__(self, *args, **kwargs):
shop = kwargs.pop('shop', None)
super(AddressBookForm, self).__init__(*args, **kwargs)
if not shop:
shop = Config.objects.get_current()
self._default_country = shop.sales_country
shipping_areas = area_choices_for_country(self._default_country)
self.fields['country'] = forms.ModelChoiceField(shop.countries(), required=False, label=_('Country'), empty_label=None, initial=shop.sales_country.pk)
self.fields['state'] = forms.ChoiceField(choices=shipping_areas, required=False, label=_('State'))
def save(self, contact, address_entry=None, **kwargs):
data = self.cleaned_data.copy()
if not address_entry:
address_entry = AddressBook()
log.debug('creating new AddressBook entry')
else:
address_entry = address_entry
log.debug('Saving Addressbook info for %s', address_entry)
for field in data.keys():
# Getting around the issue where we normally want this auto created on the front end
if field != 'addressee_name':
setattr(address_entry, field, data[field])
address_entry.addressee = data['addressee_name']
address_entry.contact = contact
address_entry.save()
YES_NO_CHOICES = (('Yes', _('Yes')),
('No', _('No')))
class YesNoForm(forms.Form):
delete_entry = forms.MultipleChoiceField(label=_('Delete entry?'), required=True, widget=forms.widgets.RadioSelect, choices=YES_NO_CHOICES, initial="No")
|
|
"""Terminal management for exposing terminals to a web interface using Tornado.
"""
# Copyright (c) Jupyter Development Team
# Copyright (c) 2014, Ramalingam Saravanan <[email protected]>
# Distributed under the terms of the Simplified BSD License.
from __future__ import absolute_import, print_function
import sys
if sys.version_info[0] < 3:
byte_code = ord
else:
byte_code = lambda x: x
unicode = str
from collections import deque
import itertools
import logging
import os
import signal
try:
from ptyprocess import PtyProcessUnicode
except ImportError:
from winpty import PtyProcess as PtyProcessUnicode
from tornado import gen
from tornado.ioloop import IOLoop
ENV_PREFIX = "PYXTERM_" # Environment variable prefix
DEFAULT_TERM_TYPE = "xterm"
class PtyWithClients(object):
def __init__(self, ptyproc):
self.ptyproc = ptyproc
self.clients = []
# Store the last few things read, so when a new client connects,
# it can show e.g. the most recent prompt, rather than absolutely
# nothing.
self.read_buffer = deque([], maxlen=10)
def resize_to_smallest(self):
"""Set the terminal size to that of the smallest client dimensions.
A terminal not using the full space available is much nicer than a
terminal trying to use more than the available space, so we keep it
sized to the smallest client.
"""
minrows = mincols = 10001
for client in self.clients:
rows, cols = client.size
if rows is not None and rows < minrows:
minrows = rows
if cols is not None and cols < mincols:
mincols = cols
if minrows == 10001 or mincols == 10001:
return
rows, cols = self.ptyproc.getwinsize()
if (rows, cols) != (minrows, mincols):
self.ptyproc.setwinsize(minrows, mincols)
def kill(self, sig=signal.SIGTERM):
"""Send a signal to the process in the pty"""
self.ptyproc.kill(sig)
def killpg(self, sig=signal.SIGTERM):
"""Send a signal to the process group of the process in the pty"""
if os.name == 'nt':
return self.ptyproc.kill(sig)
pgid = os.getpgid(self.ptyproc.pid)
os.killpg(pgid, sig)
@gen.coroutine
def terminate(self, force=False):
'''This forces a child process to terminate. It starts nicely with
SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
returns True if the child was terminated. This returns False if the
child could not be terminated. '''
if os.name == 'nt':
signals = [signal.SIGINT, signal.SIGTERM]
else:
signals = [signal.SIGHUP, signal.SIGCONT, signal.SIGINT,
signal.SIGTERM]
loop = IOLoop.current()
sleep = lambda : gen.sleep(self.ptyproc.delayafterterminate)
if not self.ptyproc.isalive():
raise gen.Return(True)
try:
for sig in signals:
self.kill(sig)
yield sleep()
if not self.ptyproc.isalive():
raise gen.Return(True)
if force:
self.kill(signal.SIGKILL)
yield sleep()
if not self.ptyproc.isalive():
raise gen.Return(True)
else:
raise gen.Return(False)
raise gen.Return(False)
except OSError:
# I think there are kernel timing issues that sometimes cause
# this to happen. I think isalive() reports True, but the
# process is dead to the kernel.
# Make one last attempt to see if the kernel is up to date.
yield sleep()
if not self.ptyproc.isalive():
raise gen.Return(True)
else:
raise gen.Return(False)
def _update_removing(target, changes):
"""Like dict.update(), but remove keys where the value is None.
"""
for k, v in changes.items():
if v is None:
target.pop(k, None)
else:
target[k] = v
class TermManagerBase(object):
"""Base class for a terminal manager."""
def __init__(self, shell_command, server_url="", term_settings={},
extra_env=None, ioloop=None):
self.shell_command = shell_command
self.server_url = server_url
self.term_settings = term_settings
self.extra_env = extra_env
self.log = logging.getLogger(__name__)
self.ptys_by_fd = {}
if ioloop is not None:
self.ioloop = ioloop
else:
import tornado.ioloop
self.ioloop = tornado.ioloop.IOLoop.instance()
def make_term_env(self, height=25, width=80, winheight=0, winwidth=0, **kwargs):
"""Build the environment variables for the process in the terminal."""
env = os.environ.copy()
env["TERM"] = self.term_settings.get("type",DEFAULT_TERM_TYPE)
dimensions = "%dx%d" % (width, height)
if winwidth and winheight:
dimensions += ";%dx%d" % (winwidth, winheight)
env[ENV_PREFIX+"DIMENSIONS"] = dimensions
env["COLUMNS"] = str(width)
env["LINES"] = str(height)
if self.server_url:
env[ENV_PREFIX+"URL"] = self.server_url
if self.extra_env:
_update_removing(env, self.extra_env)
return env
def new_terminal(self, **kwargs):
"""Make a new terminal, return a :class:`PtyWithClients` instance."""
options = self.term_settings.copy()
options['shell_command'] = self.shell_command
options.update(kwargs)
argv = options['shell_command']
env = self.make_term_env(**options)
pty = PtyProcessUnicode.spawn(argv, env=env, cwd=options.get('cwd', None))
return PtyWithClients(pty)
def start_reading(self, ptywclients):
"""Connect a terminal to the tornado event loop to read data from it."""
fd = ptywclients.ptyproc.fd
self.ptys_by_fd[fd] = ptywclients
self.ioloop.add_handler(fd, self.pty_read, self.ioloop.READ)
def on_eof(self, ptywclients):
"""Called when the pty has closed.
"""
# Stop trying to read from that terminal
fd = ptywclients.ptyproc.fd
self.log.info("EOF on FD %d; stopping reading", fd)
del self.ptys_by_fd[fd]
self.ioloop.remove_handler(fd)
# This closes the fd, and should result in the process being reaped.
ptywclients.ptyproc.close()
def pty_read(self, fd, events=None):
"""Called by the event loop when there is pty data ready to read."""
ptywclients = self.ptys_by_fd[fd]
try:
s = ptywclients.ptyproc.read(65536)
ptywclients.read_buffer.append(s)
for client in ptywclients.clients:
client.on_pty_read(s)
except EOFError:
self.on_eof(ptywclients)
for client in ptywclients.clients:
client.on_pty_died()
def get_terminal(self, url_component=None):
"""Override in a subclass to give a terminal to a new websocket connection
The :class:`TermSocket` handler works with zero or one URL components
(capturing groups in the URL spec regex). If it receives one, it is
passed as the ``url_component`` parameter; otherwise, this is None.
"""
raise NotImplementedError
def client_disconnected(self, websocket):
"""Override this to e.g. kill terminals on client disconnection.
"""
pass
@gen.coroutine
def shutdown(self):
yield self.kill_all()
@gen.coroutine
def kill_all(self):
futures = []
for term in self.ptys_by_fd.values():
futures.append(term.terminate(force=True))
# wait for futures to finish
for f in futures:
yield f
class SingleTermManager(TermManagerBase):
"""All connections to the websocket share a common terminal."""
def __init__(self, **kwargs):
super(SingleTermManager, self).__init__(**kwargs)
self.terminal = None
def get_terminal(self, url_component=None):
if self.terminal is None:
self.terminal = self.new_terminal()
self.start_reading(self.terminal)
return self.terminal
@gen.coroutine
def kill_all(self):
yield super(SingleTermManager, self).kill_all()
self.terminal = None
class MaxTerminalsReached(Exception):
def __init__(self, max_terminals):
self.max_terminals = max_terminals
def __str__(self):
return "Cannot create more than %d terminals" % self.max_terminals
class UniqueTermManager(TermManagerBase):
"""Give each websocket a unique terminal to use."""
def __init__(self, max_terminals=None, **kwargs):
super(UniqueTermManager, self).__init__(**kwargs)
self.max_terminals = max_terminals
def get_terminal(self, url_component=None):
if self.max_terminals and len(self.ptys_by_fd) >= self.max_terminals:
raise MaxTerminalsReached(self.max_terminals)
term = self.new_terminal()
self.start_reading(term)
return term
def client_disconnected(self, websocket):
"""Send terminal SIGHUP when client disconnects."""
self.log.info("Websocket closed, sending SIGHUP to terminal.")
if websocket.terminal:
if os.name == 'nt':
websocket.terminal.kill()
# Immediately call the pty reader to process
# the eof and free up space
self.pty_read(websocket.terminal.ptyproc.fd)
return
websocket.terminal.killpg(signal.SIGHUP)
class NamedTermManager(TermManagerBase):
"""Share terminals between websockets connected to the same endpoint.
"""
def __init__(self, max_terminals=None, **kwargs):
super(NamedTermManager, self).__init__(**kwargs)
self.max_terminals = max_terminals
self.terminals = {}
def get_terminal(self, term_name):
assert term_name is not None
if term_name in self.terminals:
return self.terminals[term_name]
if self.max_terminals and len(self.terminals) >= self.max_terminals:
raise MaxTerminalsReached(self.max_terminals)
# Create new terminal
self.log.info("New terminal with specified name: %s", term_name)
term = self.new_terminal()
term.term_name = term_name
self.terminals[term_name] = term
self.start_reading(term)
return term
name_template = "%d"
def _next_available_name(self):
for n in itertools.count(start=1):
name = self.name_template % n
if name not in self.terminals:
return name
def new_named_terminal(self, **kwargs):
name = self._next_available_name()
term = self.new_terminal(**kwargs)
self.log.info("New terminal with automatic name: %s", name)
term.term_name = name
self.terminals[name] = term
self.start_reading(term)
return name, term
def kill(self, name, sig=signal.SIGTERM):
term = self.terminals[name]
term.kill(sig) # This should lead to an EOF
@gen.coroutine
def terminate(self, name, force=False):
term = self.terminals[name]
yield term.terminate(force=force)
def on_eof(self, ptywclients):
super(NamedTermManager, self).on_eof(ptywclients)
name = ptywclients.term_name
self.log.info("Terminal %s closed", name)
self.terminals.pop(name, None)
@gen.coroutine
def kill_all(self):
yield super(NamedTermManager, self).kill_all()
self.terminals = {}
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.api import keystone
from openstack_dashboard.api import nova
from openstack_dashboard.usage import quotas
from openstack_dashboard.utils.identity import IdentityMixIn
LOG = logging.getLogger(__name__)
INDEX_URL = "horizon:identity:projects:index"
ADD_USER_URL = "horizon:identity:projects:create_user"
PROJECT_GROUP_ENABLED = keystone.VERSIONS.active >= 3
PROJECT_USER_MEMBER_SLUG = "update_members"
PROJECT_GROUP_MEMBER_SLUG = "update_group_members"
COMMON_HORIZONTAL_TEMPLATE = "identity/projects/_common_horizontal_form.html"
class ProjectQuotaAction(workflows.Action):
ifcb_label = _("Injected File Content (Bytes)")
metadata_items = forms.IntegerField(min_value=-1,
label=_("Metadata Items"))
cores = forms.IntegerField(min_value=-1, label=_("VCPUs"))
instances = forms.IntegerField(min_value=-1, label=_("Instances"))
injected_files = forms.IntegerField(min_value=-1,
label=_("Injected Files"))
injected_file_content_bytes = forms.IntegerField(min_value=-1,
label=ifcb_label)
volumes = forms.IntegerField(min_value=-1, label=_("Volumes"))
snapshots = forms.IntegerField(min_value=-1, label=_("Volume Snapshots"))
gigabytes = forms.IntegerField(
min_value=-1, label=_("Total Size of Volumes and Snapshots (GiB)"))
ram = forms.IntegerField(min_value=-1, label=_("RAM (MB)"))
floating_ips = forms.IntegerField(min_value=-1, label=_("Floating IPs"))
fixed_ips = forms.IntegerField(min_value=-1, label=_("Fixed IPs"))
security_groups = forms.IntegerField(min_value=-1,
label=_("Security Groups"))
security_group_rules = forms.IntegerField(min_value=-1,
label=_("Security Group Rules"))
# Neutron
security_group = forms.IntegerField(min_value=-1,
label=_("Security Groups"))
security_group_rule = forms.IntegerField(min_value=-1,
label=_("Security Group Rules"))
floatingip = forms.IntegerField(min_value=-1, label=_("Floating IPs"))
network = forms.IntegerField(min_value=-1, label=_("Networks"))
port = forms.IntegerField(min_value=-1, label=_("Ports"))
router = forms.IntegerField(min_value=-1, label=_("Routers"))
subnet = forms.IntegerField(min_value=-1, label=_("Subnets"))
def __init__(self, request, *args, **kwargs):
super(ProjectQuotaAction, self).__init__(request,
*args,
**kwargs)
disabled_quotas = quotas.get_disabled_quotas(request)
for field in disabled_quotas:
if field in self.fields:
self.fields[field].required = False
self.fields[field].widget = forms.HiddenInput()
class UpdateProjectQuotaAction(ProjectQuotaAction):
def clean(self):
cleaned_data = super(UpdateProjectQuotaAction, self).clean()
usages = quotas.tenant_quota_usages(
self.request, tenant_id=self.initial['project_id'])
# Validate the quota values before updating quotas.
bad_values = []
for key, value in cleaned_data.items():
used = usages[key].get('used', 0)
if value is not None and value >= 0 and used > value:
bad_values.append(_('%(used)s %(key)s used') %
{'used': used,
'key': quotas.QUOTA_NAMES.get(key, key)})
if bad_values:
value_str = ", ".join(bad_values)
msg = (_('Quota value(s) cannot be less than the current usage '
'value(s): %s.') %
value_str)
raise forms.ValidationError(msg)
return cleaned_data
class Meta(object):
name = _("Quota")
slug = 'update_quotas'
help_text = _("Set maximum quotas for the project.")
permissions = ('openstack.roles.admin', 'openstack.services.compute')
class CreateProjectQuotaAction(ProjectQuotaAction):
class Meta(object):
name = _("Quota")
slug = 'create_quotas'
help_text = _("Set maximum quotas for the project.")
permissions = ('openstack.roles.admin', 'openstack.services.compute')
class UpdateProjectQuota(workflows.Step):
action_class = UpdateProjectQuotaAction
template_name = COMMON_HORIZONTAL_TEMPLATE
depends_on = ("project_id",)
contributes = quotas.QUOTA_FIELDS
class CreateProjectQuota(workflows.Step):
action_class = CreateProjectQuotaAction
template_name = COMMON_HORIZONTAL_TEMPLATE
depends_on = ("project_id",)
contributes = quotas.QUOTA_FIELDS
class CreateProjectInfoAction(workflows.Action):
# Hide the domain_id and domain_name by default
domain_id = forms.CharField(label=_("Domain ID"),
required=False,
widget=forms.HiddenInput())
domain_name = forms.CharField(label=_("Domain Name"),
required=False,
widget=forms.HiddenInput())
name = forms.CharField(label=_("Name"),
max_length=64)
description = forms.CharField(widget=forms.widgets.Textarea(
attrs={'rows': 4}),
label=_("Description"),
required=False)
enabled = forms.BooleanField(label=_("Enabled"),
required=False,
initial=True)
def __init__(self, request, *args, **kwargs):
super(CreateProjectInfoAction, self).__init__(request,
*args,
**kwargs)
# For keystone V3, display the two fields in read-only
if keystone.VERSIONS.active >= 3:
readonlyInput = forms.TextInput(attrs={'readonly': 'readonly'})
self.fields["domain_id"].widget = readonlyInput
self.fields["domain_name"].widget = readonlyInput
class Meta(object):
name = _("Project Information")
help_text = _("Create a project to organize users.")
class CreateProjectInfo(workflows.Step):
action_class = CreateProjectInfoAction
template_name = COMMON_HORIZONTAL_TEMPLATE
contributes = ("domain_id",
"domain_name",
"project_id",
"name",
"description",
"enabled")
class UpdateProjectMembersAction(workflows.MembershipAction):
def __init__(self, request, *args, **kwargs):
super(UpdateProjectMembersAction, self).__init__(request,
*args,
**kwargs)
err_msg = _('Unable to retrieve user list. Please try again later.')
# Use the domain_id from the project
domain_id = self.initial.get("domain_id", None)
project_id = ''
if 'project_id' in self.initial:
project_id = self.initial['project_id']
# Get the default role
try:
default_role = keystone.get_default_role(self.request)
# Default role is necessary to add members to a project
if default_role is None:
default = getattr(settings,
"OPENSTACK_KEYSTONE_DEFAULT_ROLE", None)
msg = (_('Could not find default role "%s" in Keystone') %
default)
raise exceptions.NotFound(msg)
except Exception:
exceptions.handle(self.request,
err_msg,
redirect=reverse(INDEX_URL))
default_role_name = self.get_default_role_field_name()
self.fields[default_role_name] = forms.CharField(required=False)
self.fields[default_role_name].initial = default_role.id
# Get list of available users
all_users = []
try:
all_users = api.keystone.user_list(request,
domain=domain_id)
except Exception:
exceptions.handle(request, err_msg)
users_list = [(user.id, user.name) for user in all_users]
# Get list of roles
role_list = []
try:
role_list = api.keystone.role_list(request)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(INDEX_URL))
for role in role_list:
field_name = self.get_member_field_name(role.id)
label = role.name
self.fields[field_name] = forms.MultipleChoiceField(required=False,
label=label)
self.fields[field_name].choices = users_list
self.fields[field_name].initial = []
# Figure out users & roles
if project_id:
try:
users_roles = api.keystone.get_project_users_roles(request,
project_id)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(INDEX_URL))
for user_id in users_roles:
roles_ids = users_roles[user_id]
for role_id in roles_ids:
field_name = self.get_member_field_name(role_id)
self.fields[field_name].initial.append(user_id)
class Meta(object):
name = _("Project Members")
slug = PROJECT_USER_MEMBER_SLUG
class UpdateProjectMembers(workflows.UpdateMembersStep):
action_class = UpdateProjectMembersAction
available_list_title = _("All Users")
members_list_title = _("Project Members")
no_available_text = _("No users found.")
no_members_text = _("No users.")
def contribute(self, data, context):
if data:
try:
roles = api.keystone.role_list(self.workflow.request)
except Exception:
exceptions.handle(self.workflow.request,
_('Unable to retrieve user list.'))
post = self.workflow.request.POST
for role in roles:
field = self.get_member_field_name(role.id)
context[field] = post.getlist(field)
return context
class UpdateProjectGroupsAction(workflows.MembershipAction):
def __init__(self, request, *args, **kwargs):
super(UpdateProjectGroupsAction, self).__init__(request,
*args,
**kwargs)
err_msg = _('Unable to retrieve group list. Please try again later.')
# Use the domain_id from the project
domain_id = self.initial.get("domain_id", None)
project_id = ''
if 'project_id' in self.initial:
project_id = self.initial['project_id']
# Get the default role
try:
default_role = api.keystone.get_default_role(self.request)
# Default role is necessary to add members to a project
if default_role is None:
default = getattr(settings,
"OPENSTACK_KEYSTONE_DEFAULT_ROLE", None)
msg = (_('Could not find default role "%s" in Keystone') %
default)
raise exceptions.NotFound(msg)
except Exception:
exceptions.handle(self.request,
err_msg,
redirect=reverse(INDEX_URL))
default_role_name = self.get_default_role_field_name()
self.fields[default_role_name] = forms.CharField(required=False)
self.fields[default_role_name].initial = default_role.id
# Get list of available groups
all_groups = []
try:
all_groups = api.keystone.group_list(request,
domain=domain_id)
except Exception:
exceptions.handle(request, err_msg)
groups_list = [(group.id, group.name) for group in all_groups]
# Get list of roles
role_list = []
try:
role_list = api.keystone.role_list(request)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(INDEX_URL))
for role in role_list:
field_name = self.get_member_field_name(role.id)
label = role.name
self.fields[field_name] = forms.MultipleChoiceField(required=False,
label=label)
self.fields[field_name].choices = groups_list
self.fields[field_name].initial = []
# Figure out groups & roles
if project_id:
try:
groups_roles = api.keystone.get_project_groups_roles(
request, project_id)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(INDEX_URL))
for group_id in groups_roles:
roles_ids = groups_roles[group_id]
for role_id in roles_ids:
field_name = self.get_member_field_name(role_id)
self.fields[field_name].initial.append(group_id)
class Meta(object):
name = _("Project Groups")
slug = PROJECT_GROUP_MEMBER_SLUG
class UpdateProjectGroups(workflows.UpdateMembersStep):
action_class = UpdateProjectGroupsAction
available_list_title = _("All Groups")
members_list_title = _("Project Groups")
no_available_text = _("No groups found.")
no_members_text = _("No groups.")
def contribute(self, data, context):
if data:
try:
roles = api.keystone.role_list(self.workflow.request)
except Exception:
exceptions.handle(self.workflow.request,
_('Unable to retrieve role list.'))
post = self.workflow.request.POST
for role in roles:
field = self.get_member_field_name(role.id)
context[field] = post.getlist(field)
return context
class CommonQuotaWorkflow(workflows.Workflow):
def _update_project_quota(self, request, data, project_id):
# Update the project quota.
nova_data = dict(
[(key, data[key]) for key in quotas.NOVA_QUOTA_FIELDS])
nova.tenant_quota_update(request, project_id, **nova_data)
if cinder.is_volume_service_enabled(request):
cinder_data = dict([(key, data[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
cinder.tenant_quota_update(request,
project_id,
**cinder_data)
if api.base.is_service_enabled(request, 'network') and \
api.neutron.is_quotas_extension_supported(request):
neutron_data = {}
disabled_quotas = quotas.get_disabled_quotas(request)
for key in quotas.NEUTRON_QUOTA_FIELDS:
if key not in disabled_quotas:
neutron_data[key] = data[key]
api.neutron.tenant_quota_update(request,
project_id,
**neutron_data)
class CreateProject(CommonQuotaWorkflow):
slug = "create_project"
name = _("Create Project")
finalize_button_name = _("Create Project")
success_message = _('Created new project "%s".')
failure_message = _('Unable to create project "%s".')
success_url = "horizon:identity:projects:index"
default_steps = (CreateProjectInfo,
UpdateProjectMembers,
CreateProjectQuota)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
if PROJECT_GROUP_ENABLED:
self.default_steps = (CreateProjectInfo,
UpdateProjectMembers,
UpdateProjectGroups,
CreateProjectQuota)
super(CreateProject, self).__init__(request=request,
context_seed=context_seed,
entry_point=entry_point,
*args,
**kwargs)
def format_status_message(self, message):
if "%s" in message:
return message % self.context.get('name', 'unknown project')
else:
return message
def _create_project(self, request, data):
# create the project
domain_id = data['domain_id']
try:
desc = data['description']
self.object = api.keystone.tenant_create(request,
name=data['name'],
description=desc,
enabled=data['enabled'],
domain=domain_id)
return self.object
except exceptions.Conflict:
msg = _('Project name "%s" is already used.') % data['name']
self.failure_message = msg
return
except Exception:
exceptions.handle(request, ignore=True)
return
def _update_project_members(self, request, data, project_id):
# update project members
users_to_add = 0
try:
available_roles = api.keystone.role_list(request)
member_step = self.get_step(PROJECT_USER_MEMBER_SLUG)
# count how many users are to be added
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
role_list = data[field_name]
users_to_add += len(role_list)
# add new users to project
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
role_list = data[field_name]
users_added = 0
for user in role_list:
api.keystone.add_tenant_user_role(request,
project=project_id,
user=user,
role=role.id)
users_added += 1
users_to_add -= users_added
except Exception:
if PROJECT_GROUP_ENABLED:
group_msg = _(", add project groups")
else:
group_msg = ""
exceptions.handle(request,
_('Failed to add %(users_to_add)s project '
'members%(group_msg)s and set project quotas.')
% {'users_to_add': users_to_add,
'group_msg': group_msg})
def _update_project_groups(self, request, data, project_id):
# update project groups
groups_to_add = 0
try:
available_roles = api.keystone.role_list(request)
member_step = self.get_step(PROJECT_GROUP_MEMBER_SLUG)
# count how many groups are to be added
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
role_list = data[field_name]
groups_to_add += len(role_list)
# add new groups to project
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
role_list = data[field_name]
groups_added = 0
for group in role_list:
api.keystone.add_group_role(request,
role=role.id,
group=group,
project=project_id)
groups_added += 1
groups_to_add -= groups_added
except Exception:
exceptions.handle(request,
_('Failed to add %s project groups '
'and update project quotas.')
% groups_to_add)
def _update_project_quota(self, request, data, project_id):
try:
super(CreateProject, self)._update_project_quota(
request, data, project_id)
except Exception:
exceptions.handle(request, _('Unable to set project quotas.'))
def handle(self, request, data):
project = self._create_project(request, data)
if not project:
return False
project_id = project.id
self._update_project_members(request, data, project_id)
if PROJECT_GROUP_ENABLED:
self._update_project_groups(request, data, project_id)
if keystone.is_cloud_admin(request):
self._update_project_quota(request, data, project_id)
return True
class CreateProjectNoQuota(CreateProject):
slug = "create_project"
name = _("Create Project")
finalize_button_name = _("Create Project")
success_message = _('Created new project "%s".')
failure_message = _('Unable to create project "%s".')
success_url = "horizon:identity:projects:index"
default_steps = (CreateProjectInfo, UpdateProjectMembers)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
if PROJECT_GROUP_ENABLED:
self.default_steps = (CreateProjectInfo,
UpdateProjectMembers,
UpdateProjectGroups,)
super(CreateProject, self).__init__(request=request,
context_seed=context_seed,
entry_point=entry_point,
*args,
**kwargs)
class UpdateProjectInfoAction(CreateProjectInfoAction):
enabled = forms.BooleanField(required=False, label=_("Enabled"))
domain_name = forms.CharField(label=_("Domain Name"),
required=False,
widget=forms.HiddenInput())
def __init__(self, request, initial, *args, **kwargs):
super(UpdateProjectInfoAction, self).__init__(
request, initial, *args, **kwargs)
if initial['project_id'] == request.user.project_id:
self.fields['enabled'].widget.attrs['disabled'] = True
self.fields['enabled'].help_text = _(
'You cannot disable your current project')
def clean(self):
cleaned_data = super(UpdateProjectInfoAction, self).clean()
# NOTE(tsufiev): in case the current project is being edited, its
# 'enabled' field is disabled to prevent changing the field value
# which is always `True` for the current project (because the user
# logged in it). Since Django treats disabled checkbox as providing
# `False` value even if its initial value is `True`, we need to
# restore the original `True` value of 'enabled' field here.
if self.fields['enabled'].widget.attrs.get('disabled', False):
cleaned_data['enabled'] = True
return cleaned_data
class Meta(object):
name = _("Project Information")
slug = 'update_info'
help_text = _("Edit the project details.")
class UpdateProjectInfo(workflows.Step):
action_class = UpdateProjectInfoAction
template_name = COMMON_HORIZONTAL_TEMPLATE
depends_on = ("project_id",)
contributes = ("domain_id",
"domain_name",
"name",
"description",
"enabled")
class UpdateProject(CommonQuotaWorkflow, IdentityMixIn):
slug = "update_project"
name = _("Edit Project")
finalize_button_name = _("Save")
success_message = _('Modified project "%s".')
failure_message = _('Unable to modify project "%s".')
success_url = "horizon:identity:projects:index"
default_steps = (UpdateProjectInfo,
UpdateProjectMembers,
UpdateProjectQuota)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
if PROJECT_GROUP_ENABLED:
self.default_steps = (UpdateProjectInfo,
UpdateProjectMembers,
UpdateProjectGroups,
UpdateProjectQuota)
super(UpdateProject, self).__init__(request=request,
context_seed=context_seed,
entry_point=entry_point,
*args,
**kwargs)
def format_status_message(self, message):
if "%s" in message:
return message % self.context.get('name', 'unknown project')
else:
return message
@memoized.memoized_method
def _get_available_roles(self, request):
return api.keystone.role_list(request)
def _update_project(self, request, data):
"""Update project info"""
domain_id = api.keystone.get_effective_domain_id(self.request)
try:
project_id = data['project_id']
return api.keystone.tenant_update(
request,
project_id,
name=data['name'],
description=data['description'],
enabled=data['enabled'],
domain=domain_id)
except exceptions.Conflict:
msg = _('Project name "%s" is already used.') % data['name']
self.failure_message = msg
return
except Exception as e:
LOG.debug('Project update failed: %s' % e)
exceptions.handle(request, ignore=True)
return
def _add_roles_to_users(self, request, data, project_id, user_id,
role_ids, available_roles):
member_step = self.get_step(PROJECT_USER_MEMBER_SLUG)
current_role_ids = list(role_ids)
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
# Check if the user is in the list of users with this role.
if user_id in data[field_name]:
# Add it if necessary
if role.id not in current_role_ids:
# user role has changed
api.keystone.add_tenant_user_role(
request,
project=project_id,
user=user_id,
role=role.id)
else:
# User role is unchanged, so remove it from the
# remaining roles list to avoid removing it later.
index = current_role_ids.index(role.id)
current_role_ids.pop(index)
return current_role_ids
def _remove_roles_from_user(self, request, project_id, user_id,
current_role_ids):
for id_to_delete in current_role_ids:
api.keystone.remove_tenant_user_role(
request,
project=project_id,
user=user_id,
role=id_to_delete)
def _is_removing_self_admin_role(self, request, project_id, user_id,
available_roles, current_role_ids):
is_current_user = user_id == request.user.id
is_current_project = project_id == request.user.tenant_id
_admin_roles = self.get_admin_roles()
available_admin_role_ids = [role.id for role in available_roles
if role.name.lower() in _admin_roles]
admin_roles = [role for role in current_role_ids
if role in available_admin_role_ids]
if len(admin_roles):
removing_admin = any([role in current_role_ids
for role in admin_roles])
else:
removing_admin = False
if is_current_user and is_current_project and removing_admin:
# Cannot remove "admin" role on current(admin) project
msg = _('You cannot revoke your administrative privileges '
'from the project you are currently logged into. '
'Please switch to another project with '
'administrative privileges or remove the '
'administrative role manually via the CLI.')
messages.warning(request, msg)
return True
else:
return False
def _update_project_members(self, request, data, project_id):
# update project members
users_to_modify = 0
# Project-user member step
member_step = self.get_step(PROJECT_USER_MEMBER_SLUG)
try:
# Get our role options
available_roles = self._get_available_roles(request)
# Get the users currently associated with this project so we
# can diff against it.
users_roles = api.keystone.get_project_users_roles(
request, project=project_id)
users_to_modify = len(users_roles)
# TODO(bpokorny): The following lines are needed to make sure we
# only modify roles for users who are in the current domain.
# Otherwise, we'll end up removing roles for users who have roles
# on the project but aren't in the domain. For now, Horizon won't
# support managing roles across domains. The Keystone CLI
# supports it, so we may want to add that in the future.
all_users = api.keystone.user_list(request,
domain=data['domain_id'])
users_dict = {user.id: user.name for user in all_users}
for user_id in users_roles.keys():
# Don't remove roles if the user isn't in the domain
if user_id not in users_dict:
users_to_modify -= 1
continue
# Check if there have been any changes in the roles of
# Existing project members.
current_role_ids = list(users_roles[user_id])
modified_role_ids = self._add_roles_to_users(
request, data, project_id, user_id,
current_role_ids, available_roles)
# Prevent admins from doing stupid things to themselves.
removing_admin = self._is_removing_self_admin_role(
request, project_id, user_id, available_roles,
modified_role_ids)
# Otherwise go through and revoke any removed roles.
if not removing_admin:
self._remove_roles_from_user(request, project_id, user_id,
modified_role_ids)
users_to_modify -= 1
# Grant new roles on the project.
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
# Count how many users may be added for exception handling.
users_to_modify += len(data[field_name])
for role in available_roles:
users_added = 0
field_name = member_step.get_member_field_name(role.id)
for user_id in data[field_name]:
if user_id not in users_roles:
api.keystone.add_tenant_user_role(request,
project=project_id,
user=user_id,
role=role.id)
users_added += 1
users_to_modify -= users_added
return True
except Exception:
if PROJECT_GROUP_ENABLED:
group_msg = _(", update project groups")
else:
group_msg = ""
exceptions.handle(request,
_('Failed to modify %(users_to_modify)s'
' project members%(group_msg)s and '
'update project quotas.')
% {'users_to_modify': users_to_modify,
'group_msg': group_msg})
return False
def _update_project_groups(self, request, data, project_id, domain_id):
# update project groups
groups_to_modify = 0
member_step = self.get_step(PROJECT_GROUP_MEMBER_SLUG)
try:
available_roles = self._get_available_roles(request)
# Get the groups currently associated with this project so we
# can diff against it.
project_groups = api.keystone.group_list(request,
domain=domain_id,
project=project_id)
groups_to_modify = len(project_groups)
for group in project_groups:
# Check if there have been any changes in the roles of
# Existing project members.
current_roles = api.keystone.roles_for_group(
self.request,
group=group.id,
project=project_id)
current_role_ids = [role.id for role in current_roles]
for role in available_roles:
# Check if the group is in the list of groups with
# this role.
field_name = member_step.get_member_field_name(role.id)
if group.id in data[field_name]:
# Add it if necessary
if role.id not in current_role_ids:
# group role has changed
api.keystone.add_group_role(
request,
role=role.id,
group=group.id,
project=project_id)
else:
# Group role is unchanged, so remove it from
# the remaining roles list to avoid removing it
# later.
index = current_role_ids.index(role.id)
current_role_ids.pop(index)
# Revoke any removed roles.
for id_to_delete in current_role_ids:
api.keystone.remove_group_role(request,
role=id_to_delete,
group=group.id,
project=project_id)
groups_to_modify -= 1
# Grant new roles on the project.
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
# Count how many groups may be added for error handling.
groups_to_modify += len(data[field_name])
for role in available_roles:
groups_added = 0
field_name = member_step.get_member_field_name(role.id)
for group_id in data[field_name]:
if not filter(lambda x: group_id == x.id,
project_groups):
api.keystone.add_group_role(request,
role=role.id,
group=group_id,
project=project_id)
groups_added += 1
groups_to_modify -= groups_added
return True
except Exception:
exceptions.handle(request,
_('Failed to modify %s project '
'members, update project groups '
'and update project quotas.')
% groups_to_modify)
return False
def _update_project_quota(self, request, data, project_id):
try:
super(UpdateProject, self)._update_project_quota(
request, data, project_id)
return True
except Exception:
exceptions.handle(request, _('Modified project information and '
'members, but unable to modify '
'project quotas.'))
return False
def handle(self, request, data):
# FIXME(gabriel): This should be refactored to use Python's built-in
# sets and do this all in a single "roles to add" and "roles to remove"
# pass instead of the multi-pass thing happening now.
project = self._update_project(request, data)
if not project:
return False
project_id = data['project_id']
# Use the domain_id from the project if available
domain_id = getattr(project, "domain_id", '')
ret = self._update_project_members(request, data, project_id)
if not ret:
return False
if PROJECT_GROUP_ENABLED:
ret = self._update_project_groups(request, data,
project_id, domain_id)
if not ret:
return False
if api.keystone.is_cloud_admin(request):
ret = self._update_project_quota(request, data, project_id)
if not ret:
return False
return True
class UpdateProjectNoQuota(UpdateProject):
slug = "update_project"
name = _("Edit Project")
finalize_button_name = _("Save")
success_message = _('Modified project "%s".')
failure_message = _('Unable to modify project "%s".')
success_url = "horizon:identity:projects:index"
default_steps = (UpdateProjectInfo, UpdateProjectMembers)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
if PROJECT_GROUP_ENABLED:
self.default_steps = (UpdateProjectInfo,
UpdateProjectMembers,
UpdateProjectGroups)
super(UpdateProject, self).__init__(request=request,
context_seed=context_seed,
entry_point=entry_point,
*args,
**kwargs)
|
|
#!/usr/bin/env python
import unittest
import random
import time
import pickle
import warnings
from math import log, exp, pi, fsum, sin
from functools import reduce
from test import test_support
class TestBasicOps(unittest.TestCase):
# Superclass with tests common to all generators.
# Subclasses must arrange for self.gen to retrieve the Random instance
# to be tested.
def randomlist(self, n):
"""Helper function to make a list of random numbers"""
return [self.gen.random() for i in xrange(n)]
def test_autoseed(self):
self.gen.seed()
state1 = self.gen.getstate()
time.sleep(0.1)
self.gen.seed() # diffent seeds at different times
state2 = self.gen.getstate()
self.assertNotEqual(state1, state2)
def test_saverestore(self):
N = 1000
self.gen.seed()
state = self.gen.getstate()
randseq = self.randomlist(N)
self.gen.setstate(state) # should regenerate the same sequence
self.assertEqual(randseq, self.randomlist(N))
def test_seedargs(self):
for arg in [None, 0, 0L, 1, 1L, -1, -1L, 10**20, -(10**20),
3.14, 1+2j, 'a', tuple('abc')]:
self.gen.seed(arg)
for arg in [range(3), dict(one=1)]:
self.assertRaises(TypeError, self.gen.seed, arg)
self.assertRaises(TypeError, self.gen.seed, 1, 2)
self.assertRaises(TypeError, type(self.gen), [])
def test_jumpahead(self):
self.gen.seed()
state1 = self.gen.getstate()
self.gen.jumpahead(100)
state2 = self.gen.getstate() # s/b distinct from state1
self.assertNotEqual(state1, state2)
self.gen.jumpahead(100)
state3 = self.gen.getstate() # s/b distinct from state2
self.assertNotEqual(state2, state3)
with test_support.check_py3k_warnings(quiet=True):
self.assertRaises(TypeError, self.gen.jumpahead) # needs an arg
self.assertRaises(TypeError, self.gen.jumpahead, 2, 3) # too many
def test_sample(self):
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
N = 100
population = xrange(N)
for k in xrange(N+1):
s = self.gen.sample(population, k)
self.assertEqual(len(s), k)
uniq = set(s)
self.assertEqual(len(uniq), k)
self.assertTrue(uniq <= set(population))
self.assertEqual(self.gen.sample([], 0), []) # test edge case N==k==0
def test_sample_distribution(self):
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n = 5
pop = range(n)
trials = 10000 # large num prevents false negatives without slowing normal case
def factorial(n):
return reduce(int.__mul__, xrange(1, n), 1)
for k in xrange(n):
expected = factorial(n) // factorial(n-k)
perms = {}
for i in xrange(trials):
perms[tuple(self.gen.sample(pop, k))] = None
if len(perms) == expected:
break
else:
self.fail()
def test_sample_inputs(self):
# SF bug #801342 -- population can be any iterable defining __len__()
self.gen.sample(set(range(20)), 2)
self.gen.sample(range(20), 2)
self.gen.sample(xrange(20), 2)
self.gen.sample(str('abcdefghijklmnopqrst'), 2)
self.gen.sample(tuple('abcdefghijklmnopqrst'), 2)
def test_sample_on_dicts(self):
self.gen.sample(dict.fromkeys('abcdefghijklmnopqrst'), 2)
# SF bug #1460340 -- random.sample can raise KeyError
a = dict.fromkeys(range(10)+range(10,100,2)+range(100,110))
self.gen.sample(a, 3)
# A followup to bug #1460340: sampling from a dict could return
# a subset of its keys or of its values, depending on the size of
# the subset requested.
N = 30
d = dict((i, complex(i, i)) for i in xrange(N))
for k in xrange(N+1):
samp = self.gen.sample(d, k)
# Verify that we got ints back (keys); the values are complex.
for x in samp:
self.assertTrue(type(x) is int)
samp.sort()
self.assertEqual(samp, range(N))
def test_gauss(self):
# Ensure that the seed() method initializes all the hidden state. In
# particular, through 2.2.1 it failed to reset a piece of state used
# by (and only by) the .gauss() method.
for seed in 1, 12, 123, 1234, 12345, 123456, 654321:
self.gen.seed(seed)
x1 = self.gen.random()
y1 = self.gen.gauss(0, 1)
self.gen.seed(seed)
x2 = self.gen.random()
y2 = self.gen.gauss(0, 1)
self.assertEqual(x1, x2)
self.assertEqual(y1, y2)
def test_pickling(self):
state = pickle.dumps(self.gen)
origseq = [self.gen.random() for i in xrange(10)]
newgen = pickle.loads(state)
restoredseq = [newgen.random() for i in xrange(10)]
self.assertEqual(origseq, restoredseq)
def test_bug_1727780(self):
# verify that version-2-pickles can be loaded
# fine, whether they are created on 32-bit or 64-bit
# platforms, and that version-3-pickles load fine.
files = [("randv2_32.pck", 780),
("randv2_64.pck", 866),
("randv3.pck", 343)]
for file, value in files:
f = open(test_support.findfile(file),"rb")
r = pickle.load(f)
f.close()
self.assertEqual(r.randrange(1000), value)
class WichmannHill_TestBasicOps(TestBasicOps):
gen = random.WichmannHill()
def test_setstate_first_arg(self):
self.assertRaises(ValueError, self.gen.setstate, (2, None, None))
def test_strong_jumpahead(self):
# tests that jumpahead(n) semantics correspond to n calls to random()
N = 1000
s = self.gen.getstate()
self.gen.jumpahead(N)
r1 = self.gen.random()
# now do it the slow way
self.gen.setstate(s)
for i in xrange(N):
self.gen.random()
r2 = self.gen.random()
self.assertEqual(r1, r2)
def test_gauss_with_whseed(self):
# Ensure that the seed() method initializes all the hidden state. In
# particular, through 2.2.1 it failed to reset a piece of state used
# by (and only by) the .gauss() method.
for seed in 1, 12, 123, 1234, 12345, 123456, 654321:
self.gen.whseed(seed)
x1 = self.gen.random()
y1 = self.gen.gauss(0, 1)
self.gen.whseed(seed)
x2 = self.gen.random()
y2 = self.gen.gauss(0, 1)
self.assertEqual(x1, x2)
self.assertEqual(y1, y2)
def test_bigrand(self):
# Verify warnings are raised when randrange is too large for random()
with warnings.catch_warnings():
warnings.filterwarnings("error", "Underlying random")
self.assertRaises(UserWarning, self.gen.randrange, 2**60)
class SystemRandom_TestBasicOps(TestBasicOps):
gen = random.SystemRandom()
def test_autoseed(self):
# Doesn't need to do anything except not fail
self.gen.seed()
def test_saverestore(self):
self.assertRaises(NotImplementedError, self.gen.getstate)
self.assertRaises(NotImplementedError, self.gen.setstate, None)
def test_seedargs(self):
# Doesn't need to do anything except not fail
self.gen.seed(100)
def test_jumpahead(self):
# Doesn't need to do anything except not fail
self.gen.jumpahead(100)
def test_gauss(self):
self.gen.gauss_next = None
self.gen.seed(100)
self.assertEqual(self.gen.gauss_next, None)
def test_pickling(self):
self.assertRaises(NotImplementedError, pickle.dumps, self.gen)
def test_53_bits_per_float(self):
# This should pass whenever a C double has 53 bit precision.
span = 2 ** 53
cum = 0
for i in xrange(100):
cum |= int(self.gen.random() * span)
self.assertEqual(cum, span-1)
def test_bigrand(self):
# The randrange routine should build-up the required number of bits
# in stages so that all bit positions are active.
span = 2 ** 500
cum = 0
for i in xrange(100):
r = self.gen.randrange(span)
self.assertTrue(0 <= r < span)
cum |= r
self.assertEqual(cum, span-1)
def test_bigrand_ranges(self):
for i in [40,80, 160, 200, 211, 250, 375, 512, 550]:
start = self.gen.randrange(2 ** i)
stop = self.gen.randrange(2 ** (i-2))
if stop <= start:
return
self.assertTrue(start <= self.gen.randrange(start, stop) < stop)
def test_rangelimits(self):
for start, stop in [(-2,0), (-(2**60)-2,-(2**60)), (2**60,2**60+2)]:
self.assertEqual(set(range(start,stop)),
set([self.gen.randrange(start,stop) for i in xrange(100)]))
def test_genrandbits(self):
# Verify ranges
for k in xrange(1, 1000):
self.assertTrue(0 <= self.gen.getrandbits(k) < 2**k)
# Verify all bits active
getbits = self.gen.getrandbits
for span in [1, 2, 3, 4, 31, 32, 32, 52, 53, 54, 119, 127, 128, 129]:
cum = 0
for i in xrange(100):
cum |= getbits(span)
self.assertEqual(cum, 2**span-1)
# Verify argument checking
self.assertRaises(TypeError, self.gen.getrandbits)
self.assertRaises(TypeError, self.gen.getrandbits, 1, 2)
self.assertRaises(ValueError, self.gen.getrandbits, 0)
self.assertRaises(ValueError, self.gen.getrandbits, -1)
self.assertRaises(TypeError, self.gen.getrandbits, 10.1)
def test_randbelow_logic(self, _log=log, int=int):
# check bitcount transition points: 2**i and 2**(i+1)-1
# show that: k = int(1.001 + _log(n, 2))
# is equal to or one greater than the number of bits in n
for i in xrange(1, 1000):
n = 1L << i # check an exact power of two
numbits = i+1
k = int(1.00001 + _log(n, 2))
self.assertEqual(k, numbits)
self.assertTrue(n == 2**(k-1))
n += n - 1 # check 1 below the next power of two
k = int(1.00001 + _log(n, 2))
self.assertIn(k, [numbits, numbits+1])
self.assertTrue(2**k > n > 2**(k-2))
n -= n >> 15 # check a little farther below the next power of two
k = int(1.00001 + _log(n, 2))
self.assertEqual(k, numbits) # note the stronger assertion
self.assertTrue(2**k > n > 2**(k-1)) # note the stronger assertion
class MersenneTwister_TestBasicOps(TestBasicOps):
gen = random.Random()
def test_setstate_first_arg(self):
self.assertRaises(ValueError, self.gen.setstate, (1, None, None))
def test_setstate_middle_arg(self):
# Wrong type, s/b tuple
self.assertRaises(TypeError, self.gen.setstate, (2, None, None))
# Wrong length, s/b 625
self.assertRaises(ValueError, self.gen.setstate, (2, (1,2,3), None))
# Wrong type, s/b tuple of 625 ints
self.assertRaises(TypeError, self.gen.setstate, (2, ('a',)*625, None))
# Last element s/b an int also
self.assertRaises(TypeError, self.gen.setstate, (2, (0,)*624+('a',), None))
def test_referenceImplementation(self):
# Compare the python implementation with results from the original
# code. Create 2000 53-bit precision random floats. Compare only
# the last ten entries to show that the independent implementations
# are tracking. Here is the main() function needed to create the
# list of expected random numbers:
# void main(void){
# int i;
# unsigned long init[4]={61731, 24903, 614, 42143}, length=4;
# init_by_array(init, length);
# for (i=0; i<2000; i++) {
# printf("%.15f ", genrand_res53());
# if (i%5==4) printf("\n");
# }
# }
expected = [0.45839803073713259,
0.86057815201978782,
0.92848331726782152,
0.35932681119782461,
0.081823493762449573,
0.14332226470169329,
0.084297823823520024,
0.53814864671831453,
0.089215024911993401,
0.78486196105372907]
self.gen.seed(61731L + (24903L<<32) + (614L<<64) + (42143L<<96))
actual = self.randomlist(2000)[-10:]
for a, e in zip(actual, expected):
self.assertAlmostEqual(a,e,places=14)
def test_strong_reference_implementation(self):
# Like test_referenceImplementation, but checks for exact bit-level
# equality. This should pass on any box where C double contains
# at least 53 bits of precision (the underlying algorithm suffers
# no rounding errors -- all results are exact).
from math import ldexp
expected = [0x0eab3258d2231fL,
0x1b89db315277a5L,
0x1db622a5518016L,
0x0b7f9af0d575bfL,
0x029e4c4db82240L,
0x04961892f5d673L,
0x02b291598e4589L,
0x11388382c15694L,
0x02dad977c9e1feL,
0x191d96d4d334c6L]
self.gen.seed(61731L + (24903L<<32) + (614L<<64) + (42143L<<96))
actual = self.randomlist(2000)[-10:]
for a, e in zip(actual, expected):
self.assertEqual(long(ldexp(a, 53)), e)
def test_long_seed(self):
# This is most interesting to run in debug mode, just to make sure
# nothing blows up. Under the covers, a dynamically resized array
# is allocated, consuming space proportional to the number of bits
# in the seed. Unfortunately, that's a quadratic-time algorithm,
# so don't make this horribly big.
seed = (1L << (10000 * 8)) - 1 # about 10K bytes
self.gen.seed(seed)
def test_53_bits_per_float(self):
# This should pass whenever a C double has 53 bit precision.
span = 2 ** 53
cum = 0
for i in xrange(100):
cum |= int(self.gen.random() * span)
self.assertEqual(cum, span-1)
def test_bigrand(self):
# The randrange routine should build-up the required number of bits
# in stages so that all bit positions are active.
span = 2 ** 500
cum = 0
for i in xrange(100):
r = self.gen.randrange(span)
self.assertTrue(0 <= r < span)
cum |= r
self.assertEqual(cum, span-1)
def test_bigrand_ranges(self):
for i in [40,80, 160, 200, 211, 250, 375, 512, 550]:
start = self.gen.randrange(2 ** i)
stop = self.gen.randrange(2 ** (i-2))
if stop <= start:
return
self.assertTrue(start <= self.gen.randrange(start, stop) < stop)
def test_rangelimits(self):
for start, stop in [(-2,0), (-(2**60)-2,-(2**60)), (2**60,2**60+2)]:
self.assertEqual(set(range(start,stop)),
set([self.gen.randrange(start,stop) for i in xrange(100)]))
def test_genrandbits(self):
# Verify cross-platform repeatability
self.gen.seed(1234567)
self.assertEqual(self.gen.getrandbits(100),
97904845777343510404718956115L)
# Verify ranges
for k in xrange(1, 1000):
self.assertTrue(0 <= self.gen.getrandbits(k) < 2**k)
# Verify all bits active
getbits = self.gen.getrandbits
for span in [1, 2, 3, 4, 31, 32, 32, 52, 53, 54, 119, 127, 128, 129]:
cum = 0
for i in xrange(100):
cum |= getbits(span)
self.assertEqual(cum, 2**span-1)
# Verify argument checking
self.assertRaises(TypeError, self.gen.getrandbits)
self.assertRaises(TypeError, self.gen.getrandbits, 'a')
self.assertRaises(TypeError, self.gen.getrandbits, 1, 2)
self.assertRaises(ValueError, self.gen.getrandbits, 0)
self.assertRaises(ValueError, self.gen.getrandbits, -1)
def test_randbelow_logic(self, _log=log, int=int):
# check bitcount transition points: 2**i and 2**(i+1)-1
# show that: k = int(1.001 + _log(n, 2))
# is equal to or one greater than the number of bits in n
for i in xrange(1, 1000):
n = 1L << i # check an exact power of two
numbits = i+1
k = int(1.00001 + _log(n, 2))
self.assertEqual(k, numbits)
self.assertTrue(n == 2**(k-1))
n += n - 1 # check 1 below the next power of two
k = int(1.00001 + _log(n, 2))
self.assertIn(k, [numbits, numbits+1])
self.assertTrue(2**k > n > 2**(k-2))
n -= n >> 15 # check a little farther below the next power of two
k = int(1.00001 + _log(n, 2))
self.assertEqual(k, numbits) # note the stronger assertion
self.assertTrue(2**k > n > 2**(k-1)) # note the stronger assertion
def test_randrange_bug_1590891(self):
start = 1000000000000
stop = -100000000000000000000
step = -200
x = self.gen.randrange(start, stop, step)
self.assertTrue(stop < x <= start)
self.assertEqual((x+stop)%step, 0)
def gamma(z, sqrt2pi=(2.0*pi)**0.5):
# Reflection to right half of complex plane
if z < 0.5:
return pi / sin(pi*z) / gamma(1.0-z)
# Lanczos approximation with g=7
az = z + (7.0 - 0.5)
return az ** (z-0.5) / exp(az) * sqrt2pi * fsum([
0.9999999999995183,
676.5203681218835 / z,
-1259.139216722289 / (z+1.0),
771.3234287757674 / (z+2.0),
-176.6150291498386 / (z+3.0),
12.50734324009056 / (z+4.0),
-0.1385710331296526 / (z+5.0),
0.9934937113930748e-05 / (z+6.0),
0.1659470187408462e-06 / (z+7.0),
])
class TestDistributions(unittest.TestCase):
def test_zeroinputs(self):
# Verify that distributions can handle a series of zero inputs'
g = random.Random()
x = [g.random() for i in xrange(50)] + [0.0]*5
g.random = x[:].pop; g.uniform(1,10)
g.random = x[:].pop; g.paretovariate(1.0)
g.random = x[:].pop; g.expovariate(1.0)
g.random = x[:].pop; g.weibullvariate(1.0, 1.0)
g.random = x[:].pop; g.normalvariate(0.0, 1.0)
g.random = x[:].pop; g.gauss(0.0, 1.0)
g.random = x[:].pop; g.lognormvariate(0.0, 1.0)
g.random = x[:].pop; g.vonmisesvariate(0.0, 1.0)
g.random = x[:].pop; g.gammavariate(0.01, 1.0)
g.random = x[:].pop; g.gammavariate(1.0, 1.0)
g.random = x[:].pop; g.gammavariate(200.0, 1.0)
g.random = x[:].pop; g.betavariate(3.0, 3.0)
g.random = x[:].pop; g.triangular(0.0, 1.0, 1.0/3.0)
def test_avg_std(self):
# Use integration to test distribution average and standard deviation.
# Only works for distributions which do not consume variates in pairs
g = random.Random()
N = 5000
x = [i/float(N) for i in xrange(1,N)]
for variate, args, mu, sigmasqrd in [
(g.uniform, (1.0,10.0), (10.0+1.0)/2, (10.0-1.0)**2/12),
(g.triangular, (0.0, 1.0, 1.0/3.0), 4.0/9.0, 7.0/9.0/18.0),
(g.expovariate, (1.5,), 1/1.5, 1/1.5**2),
(g.paretovariate, (5.0,), 5.0/(5.0-1),
5.0/((5.0-1)**2*(5.0-2))),
(g.weibullvariate, (1.0, 3.0), gamma(1+1/3.0),
gamma(1+2/3.0)-gamma(1+1/3.0)**2) ]:
g.random = x[:].pop
y = []
for i in xrange(len(x)):
try:
y.append(variate(*args))
except IndexError:
pass
s1 = s2 = 0
for e in y:
s1 += e
s2 += (e - mu) ** 2
N = len(y)
self.assertAlmostEqual(s1/N, mu, 2)
self.assertAlmostEqual(s2/(N-1), sigmasqrd, 2)
class TestModule(unittest.TestCase):
def testMagicConstants(self):
self.assertAlmostEqual(random.NV_MAGICCONST, 1.71552776992141)
self.assertAlmostEqual(random.TWOPI, 6.28318530718)
self.assertAlmostEqual(random.LOG4, 1.38629436111989)
self.assertAlmostEqual(random.SG_MAGICCONST, 2.50407739677627)
def test__all__(self):
# tests validity but not completeness of the __all__ list
self.assertTrue(set(random.__all__) <= set(dir(random)))
def test_random_subclass_with_kwargs(self):
# SF bug #1486663 -- this used to erroneously raise a TypeError
class Subclass(random.Random):
def __init__(self, newarg=None):
random.Random.__init__(self)
Subclass(newarg=1)
def test_main(verbose=None):
testclasses = [WichmannHill_TestBasicOps,
MersenneTwister_TestBasicOps,
TestDistributions,
TestModule]
try:
random.SystemRandom().random()
except NotImplementedError:
pass
else:
testclasses.append(SystemRandom_TestBasicOps)
test_support.run_unittest(*testclasses)
# verify reference counting
import sys
if verbose and hasattr(sys, "gettotalrefcount"):
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*testclasses)
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True)
|
|
"""Test the sonos config flow."""
from __future__ import annotations
from unittest.mock import MagicMock, patch
from homeassistant import config_entries, core
from homeassistant.components import ssdp, zeroconf
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.components.sonos.const import DATA_SONOS_DISCOVERY_MANAGER, DOMAIN
from homeassistant.const import CONF_HOSTS
from homeassistant.setup import async_setup_component
async def test_user_form(
hass: core.HomeAssistant, zeroconf_payload: zeroconf.ZeroconfServiceInfo
):
"""Test we get the user initiated form."""
# Ensure config flow will fail if no devices discovered yet
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == "abort"
assert result["reason"] == "no_devices_found"
# Initiate a discovery to allow config entry creation
await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=zeroconf_payload,
)
# Ensure config flow succeeds after discovery
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] is None
with patch(
"homeassistant.components.sonos.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.sonos.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Sonos"
assert result2["data"] == {}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_user_form_already_created(hass: core.HomeAssistant):
"""Ensure we abort a flow if the entry is already created from config."""
config = {DOMAIN: {MP_DOMAIN: {CONF_HOSTS: "192.168.4.2"}}}
await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "abort"
assert result["reason"] == "single_instance_allowed"
async def test_zeroconf_form(
hass: core.HomeAssistant, zeroconf_payload: zeroconf.ZeroconfServiceInfo
):
"""Test we pass Zeroconf discoveries to the manager."""
mock_manager = hass.data[DATA_SONOS_DISCOVERY_MANAGER] = MagicMock()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=zeroconf_payload,
)
assert result["type"] == "form"
assert result["errors"] is None
with patch(
"homeassistant.components.sonos.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.sonos.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Sonos"
assert result2["data"] == {}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert len(mock_manager.mock_calls) == 2
async def test_ssdp_discovery(hass: core.HomeAssistant, soco):
"""Test that SSDP discoveries create a config flow."""
await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data=ssdp.SsdpServiceInfo(
ssdp_location=f"http://{soco.ip_address}/",
ssdp_st="urn:schemas-upnp-org:device:ZonePlayer:1",
ssdp_usn=f"uuid:{soco.uid}_MR::urn:schemas-upnp-org:service:GroupRenderingControl:1",
upnp={
ssdp.ATTR_UPNP_UDN: f"uuid:{soco.uid}",
},
),
)
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
flow = flows[0]
with patch(
"homeassistant.components.sonos.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.sonos.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
flow["flow_id"],
{},
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "Sonos"
assert result["data"] == {}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_sonos_v1(hass: core.HomeAssistant):
"""Test we pass sonos devices to the discovery manager with v1 firmware devices."""
mock_manager = hass.data[DATA_SONOS_DISCOVERY_MANAGER] = MagicMock()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="192.168.1.107",
port=1443,
hostname="sonos5CAAFDE47AC8.local.",
type="_sonos._tcp.local.",
name="Sonos-5CAAFDE47AC8._sonos._tcp.local.",
properties={
"_raw": {
"info": b"/api/v1/players/RINCON_5CAAFDE47AC801400/info",
"vers": b"1",
"protovers": b"1.18.9",
},
"info": "/api/v1/players/RINCON_5CAAFDE47AC801400/info",
"vers": "1",
"protovers": "1.18.9",
},
),
)
assert result["type"] == "form"
assert result["errors"] is None
with patch(
"homeassistant.components.sonos.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.sonos.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Sonos"
assert result2["data"] == {}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert len(mock_manager.mock_calls) == 2
async def test_zeroconf_form_not_sonos(
hass: core.HomeAssistant, zeroconf_payload: zeroconf.ZeroconfServiceInfo
):
"""Test we abort on non-sonos devices."""
mock_manager = hass.data[DATA_SONOS_DISCOVERY_MANAGER] = MagicMock()
zeroconf_payload.hostname = "not-aaa"
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=zeroconf_payload,
)
assert result["type"] == "abort"
assert result["reason"] == "not_sonos_device"
assert len(mock_manager.mock_calls) == 0
|
|
import re
class BooleanExpression:
# A simple evaluator of boolean expressions.
#
# Grammar:
# expr :: or_expr
# or_expr :: and_expr ('||' and_expr)*
# and_expr :: not_expr ('&&' not_expr)*
# not_expr :: '!' not_expr
# '(' or_expr ')'
# identifier
# identifier :: [-+=._a-zA-Z0-9]+
# Evaluates `string` as a boolean expression.
# Returns True or False. Throws a ValueError on syntax error.
#
# Variables in `variables` are true.
# Substrings of `triple` are true.
# 'true' is true.
# All other identifiers are false.
@staticmethod
def evaluate(string, variables, triple=""):
try:
parser = BooleanExpression(string, set(variables), triple)
return parser.parseAll()
except ValueError as e:
raise ValueError(str(e) + ('\nin expression: %r' % string))
#####
def __init__(self, string, variables, triple=""):
self.tokens = BooleanExpression.tokenize(string)
self.variables = variables
self.variables.add('true')
self.triple = triple
self.value = None
self.token = None
# Singleton end-of-expression marker.
END = object()
# Tokenization pattern.
Pattern = re.compile(r'\A\s*([()]|[-+=._a-zA-Z0-9]+|&&|\|\||!)\s*(.*)\Z')
@staticmethod
def tokenize(string):
while True:
m = re.match(BooleanExpression.Pattern, string)
if m is None:
if string == "":
yield BooleanExpression.END;
return
else:
raise ValueError("couldn't parse text: %r" % string)
token = m.group(1)
string = m.group(2)
yield token
def quote(self, token):
if token is BooleanExpression.END:
return '<end of expression>'
else:
return repr(token)
def accept(self, t):
if self.token == t:
self.token = next(self.tokens)
return True
else:
return False
def expect(self, t):
if self.token == t:
if self.token != BooleanExpression.END:
self.token = next(self.tokens)
else:
raise ValueError("expected: %s\nhave: %s" %
(self.quote(t), self.quote(self.token)))
def isIdentifier(self, t):
if (t is BooleanExpression.END or t == '&&' or t == '||' or
t == '!' or t == '(' or t == ')'):
return False
return True
def parseNOT(self):
if self.accept('!'):
self.parseNOT()
self.value = not self.value
elif self.accept('('):
self.parseOR()
self.expect(')')
elif not self.isIdentifier(self.token):
raise ValueError("expected: '!' or '(' or identifier\nhave: %s" %
self.quote(self.token))
else:
self.value = (self.token in self.variables or
self.token in self.triple)
self.token = next(self.tokens)
def parseAND(self):
self.parseNOT()
while self.accept('&&'):
left = self.value
self.parseNOT()
right = self.value
# this is technically the wrong associativity, but it
# doesn't matter for this limited expression grammar
self.value = left and right
def parseOR(self):
self.parseAND()
while self.accept('||'):
left = self.value
self.parseAND()
right = self.value
# this is technically the wrong associativity, but it
# doesn't matter for this limited expression grammar
self.value = left or right
def parseAll(self):
self.token = next(self.tokens)
self.parseOR()
self.expect(BooleanExpression.END)
return self.value
#######
# Tests
import unittest
class TestBooleanExpression(unittest.TestCase):
def test_variables(self):
variables = {'its-true', 'false-lol-true', 'under_score',
'e=quals', 'd1g1ts'}
self.assertTrue(BooleanExpression.evaluate('true', variables))
self.assertTrue(BooleanExpression.evaluate('its-true', variables))
self.assertTrue(BooleanExpression.evaluate('false-lol-true', variables))
self.assertTrue(BooleanExpression.evaluate('under_score', variables))
self.assertTrue(BooleanExpression.evaluate('e=quals', variables))
self.assertTrue(BooleanExpression.evaluate('d1g1ts', variables))
self.assertFalse(BooleanExpression.evaluate('false', variables))
self.assertFalse(BooleanExpression.evaluate('True', variables))
self.assertFalse(BooleanExpression.evaluate('true-ish', variables))
self.assertFalse(BooleanExpression.evaluate('not_true', variables))
self.assertFalse(BooleanExpression.evaluate('tru', variables))
def test_triple(self):
triple = 'arch-vendor-os'
self.assertTrue(BooleanExpression.evaluate('arch-', {}, triple))
self.assertTrue(BooleanExpression.evaluate('ar', {}, triple))
self.assertTrue(BooleanExpression.evaluate('ch-vend', {}, triple))
self.assertTrue(BooleanExpression.evaluate('-vendor-', {}, triple))
self.assertTrue(BooleanExpression.evaluate('-os', {}, triple))
self.assertFalse(BooleanExpression.evaluate('arch-os', {}, triple))
def test_operators(self):
self.assertTrue(BooleanExpression.evaluate('true || true', {}))
self.assertTrue(BooleanExpression.evaluate('true || false', {}))
self.assertTrue(BooleanExpression.evaluate('false || true', {}))
self.assertFalse(BooleanExpression.evaluate('false || false', {}))
self.assertTrue(BooleanExpression.evaluate('true && true', {}))
self.assertFalse(BooleanExpression.evaluate('true && false', {}))
self.assertFalse(BooleanExpression.evaluate('false && true', {}))
self.assertFalse(BooleanExpression.evaluate('false && false', {}))
self.assertFalse(BooleanExpression.evaluate('!true', {}))
self.assertTrue(BooleanExpression.evaluate('!false', {}))
self.assertTrue(BooleanExpression.evaluate(' ((!((false) )) ) ', {}))
self.assertTrue(BooleanExpression.evaluate('true && (true && (true))', {}))
self.assertTrue(BooleanExpression.evaluate('!false && !false && !! !false', {}))
self.assertTrue(BooleanExpression.evaluate('false && false || true', {}))
self.assertTrue(BooleanExpression.evaluate('(false && false) || true', {}))
self.assertFalse(BooleanExpression.evaluate('false && (false || true)', {}))
# Evaluate boolean expression `expr`.
# Fail if it does not throw a ValueError containing the text `error`.
def checkException(self, expr, error):
try:
BooleanExpression.evaluate(expr, {})
self.fail("expression %r didn't cause an exception" % expr)
except ValueError as e:
if -1 == str(e).find(error):
self.fail(("expression %r caused the wrong ValueError\n" +
"actual error was:\n%s\n" +
"expected error was:\n%s\n") % (expr, e, error))
except BaseException as e:
self.fail(("expression %r caused the wrong exception; actual " +
"exception was: \n%r") % (expr, e))
def test_errors(self):
self.checkException("ba#d",
"couldn't parse text: '#d'\n" +
"in expression: 'ba#d'")
self.checkException("true and true",
"expected: <end of expression>\n" +
"have: 'and'\n" +
"in expression: 'true and true'")
self.checkException("|| true",
"expected: '!' or '(' or identifier\n" +
"have: '||'\n" +
"in expression: '|| true'")
self.checkException("true &&",
"expected: '!' or '(' or identifier\n" +
"have: <end of expression>\n" +
"in expression: 'true &&'")
self.checkException("",
"expected: '!' or '(' or identifier\n" +
"have: <end of expression>\n" +
"in expression: ''")
self.checkException("*",
"couldn't parse text: '*'\n" +
"in expression: '*'")
self.checkException("no wait stop",
"expected: <end of expression>\n" +
"have: 'wait'\n" +
"in expression: 'no wait stop'")
self.checkException("no-$-please",
"couldn't parse text: '$-please'\n" +
"in expression: 'no-$-please'")
self.checkException("(((true && true) || true)",
"expected: ')'\n" +
"have: <end of expression>\n" +
"in expression: '(((true && true) || true)'")
self.checkException("true (true)",
"expected: <end of expression>\n" +
"have: '('\n" +
"in expression: 'true (true)'")
self.checkException("( )",
"expected: '!' or '(' or identifier\n" +
"have: ')'\n" +
"in expression: '( )'")
if __name__ == '__main__':
unittest.main()
|
|
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import errno
import logging
import os
from ceph_deploy import conf
from ceph_deploy import exc
from ceph_deploy import hosts
from ceph_deploy.util import system
from ceph_deploy.lib import remoto
from ceph_deploy.cliutil import priority
LOG = logging.getLogger(__name__)
def get_bootstrap_mds_key(cluster):
"""
Read the bootstrap-mds key for `cluster`.
"""
path = '{cluster}.bootstrap-mds.keyring'.format(cluster=cluster)
try:
with open(path, 'rb') as f:
return f.read()
except IOError:
raise RuntimeError('bootstrap-mds keyring not found; run \'gatherkeys\'')
def create_mds(distro, name, cluster, init):
conn = distro.conn
path = '/var/lib/ceph/mds/{cluster}-{name}'.format(
cluster=cluster,
name=name
)
conn.remote_module.safe_mkdir(path)
bootstrap_keyring = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format(
cluster=cluster
)
keypath = os.path.join(path, 'keyring')
stdout, stderr, returncode = remoto.process.check(
conn,
[
'ceph',
'--cluster', cluster,
'--name', 'client.bootstrap-mds',
'--keyring', bootstrap_keyring,
'auth', 'get-or-create', 'mds.{name}'.format(name=name),
'osd', 'allow rwx',
'mds', 'allow',
'mon', 'allow profile mds',
'-o',
os.path.join(keypath),
]
)
if returncode > 0 and returncode != errno.EACCES:
for line in stderr:
conn.logger.error(line)
for line in stdout:
# yes stdout as err because this is an error
conn.logger.error(line)
conn.logger.error('exit code from command was: %s' % returncode)
raise RuntimeError('could not create mds')
remoto.process.check(
conn,
[
'ceph',
'--cluster', cluster,
'--name', 'client.bootstrap-mds',
'--keyring', bootstrap_keyring,
'auth', 'get-or-create', 'mds.{name}'.format(name=name),
'osd', 'allow *',
'mds', 'allow',
'mon', 'allow rwx',
'-o',
os.path.join(keypath),
]
)
conn.remote_module.touch_file(os.path.join(path, 'done'))
conn.remote_module.touch_file(os.path.join(path, init))
if init == 'upstart':
remoto.process.run(
conn,
[
'initctl',
'emit',
'ceph-mds',
'cluster={cluster}'.format(cluster=cluster),
'id={name}'.format(name=name),
],
timeout=7
)
elif init == 'sysvinit':
remoto.process.run(
conn,
[
'service',
'ceph',
'start',
'mds.{name}'.format(name=name),
],
timeout=7
)
if distro.is_el:
system.enable_service(distro.conn)
elif init == 'systemd':
remoto.process.run(
conn,
[
'systemctl',
'enable',
'ceph-mds@{name}'.format(name=name),
],
timeout=7
)
remoto.process.run(
conn,
[
'systemctl',
'start',
'ceph-mds@{name}'.format(name=name),
],
timeout=7
)
remoto.process.run(
conn,
[
'systemctl',
'enable',
'ceph.target',
],
timeout=7
)
def mds_create(args):
cfg = conf.ceph.load(args)
LOG.debug(
'Deploying mds, cluster %s hosts %s',
args.cluster,
' '.join(':'.join(x or '' for x in t) for t in args.mds),
)
key = get_bootstrap_mds_key(cluster=args.cluster)
bootstrapped = set()
errors = 0
failed_on_rhel = False
for hostname, name in args.mds:
try:
distro = hosts.get(hostname, username=args.username)
rlogger = distro.conn.logger
LOG.info(
'Distro info: %s %s %s',
distro.name,
distro.release,
distro.codename
)
LOG.debug('remote host will use %s', distro.init)
if hostname not in bootstrapped:
bootstrapped.add(hostname)
LOG.debug('deploying mds bootstrap to %s', hostname)
conf_data = StringIO()
cfg.write(conf_data)
distro.conn.remote_module.write_conf(
args.cluster,
conf_data.getvalue(),
args.overwrite_conf,
)
path = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format(
cluster=args.cluster,
)
if not distro.conn.remote_module.path_exists(path):
rlogger.warning('mds keyring does not exist yet, creating one')
distro.conn.remote_module.write_keyring(path, key)
create_mds(distro, name, args.cluster, distro.init)
distro.conn.exit()
except RuntimeError as e:
if distro.normalized_name == 'redhat':
LOG.error('this feature may not yet available for %s %s' % (distro.name, distro.release))
failed_on_rhel = True
LOG.error(e)
errors += 1
if errors:
if failed_on_rhel:
# because users only read the last few lines :(
LOG.error(
'RHEL RHCS systems do not have the ability to deploy MDS yet'
)
raise exc.GenericError('Failed to create %d MDSs' % errors)
def mds(args):
if args.subcommand == 'create':
mds_create(args)
else:
LOG.error('subcommand %s not implemented', args.subcommand)
def colon_separated(s):
host = s
name = s
if s.count(':') == 1:
(host, name) = s.split(':')
return (host, name)
@priority(30)
def make(parser):
"""
Ceph MDS daemon management
"""
mds_parser = parser.add_subparsers(dest='subcommand')
mds_parser.required = True
mds_create = mds_parser.add_parser(
'create',
help='Deploy Ceph MDS on remote host(s)'
)
mds_create.add_argument(
'mds',
metavar='HOST[:NAME]',
nargs='+',
type=colon_separated,
help='host (and optionally the daemon name) to deploy on',
)
parser.set_defaults(
func=mds,
)
|
|
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ZipCountiesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_zip_counties(self, zip_prefix, **kwargs):
"""
Search for Zip Counties
Our `Plan` endpoints require a zip code and a fips (county) code. This is because plan pricing requires both of these elements. Users are unlikely to know their fips code, so we provide this endpoint to look up a `ZipCounty` by zip code and return both the selected zip and fips codes.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_zip_counties(zip_prefix, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str zip_prefix: Partial five-digit Zip (required)
:return: ZipCountiesResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_zip_counties_with_http_info(zip_prefix, **kwargs)
else:
(data) = self.get_zip_counties_with_http_info(zip_prefix, **kwargs)
return data
def get_zip_counties_with_http_info(self, zip_prefix, **kwargs):
"""
Search for Zip Counties
Our `Plan` endpoints require a zip code and a fips (county) code. This is because plan pricing requires both of these elements. Users are unlikely to know their fips code, so we provide this endpoint to look up a `ZipCounty` by zip code and return both the selected zip and fips codes.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_zip_counties_with_http_info(zip_prefix, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str zip_prefix: Partial five-digit Zip (required)
:return: ZipCountiesResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['zip_prefix']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_zip_counties" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'zip_prefix' is set
if ('zip_prefix' not in params) or (params['zip_prefix'] is None):
raise ValueError("Missing the required parameter `zip_prefix` when calling `get_zip_counties`")
collection_formats = {}
resource_path = '/zip_counties'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'zip_prefix' in params:
query_params['zip_prefix'] = params['zip_prefix']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['Vericred-Api-Key']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ZipCountiesResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def show_zip_county(self, id, **kwargs):
"""
Show an individual ZipCounty
Our `Plan` endpoints require a zip code and a fips (county) code. This is because plan pricing requires both of these elements. Users are unlikely to know their fips code, so we provide this endpoint to returns the details for a `ZipCounty` by zip code and return both the selected zip and fips codes.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.show_zip_county(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Unique ID for ZipCounty (required)
:return: ZipCountyResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.show_zip_county_with_http_info(id, **kwargs)
else:
(data) = self.show_zip_county_with_http_info(id, **kwargs)
return data
def show_zip_county_with_http_info(self, id, **kwargs):
"""
Show an individual ZipCounty
Our `Plan` endpoints require a zip code and a fips (county) code. This is because plan pricing requires both of these elements. Users are unlikely to know their fips code, so we provide this endpoint to returns the details for a `ZipCounty` by zip code and return both the selected zip and fips codes.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.show_zip_county_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Unique ID for ZipCounty (required)
:return: ZipCountyResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method show_zip_county" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `show_zip_county`")
collection_formats = {}
resource_path = '/zip_counties/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['Vericred-Api-Key']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ZipCountyResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
|
|
# The MIT License (MIT)
# Copyright (c) 2021 Microsoft Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Internal methods for executing functions in the Azure Cosmos database service.
"""
import time
import asyncio
from azure.core.exceptions import AzureError, ClientAuthenticationError
from azure.core.pipeline.policies import AsyncRetryPolicy
from .. import exceptions
from ..http_constants import HttpHeaders, StatusCodes, SubStatusCodes
from .._retry_utility import _configure_timeout
from .. import _endpoint_discovery_retry_policy
from .. import _resource_throttle_retry_policy
from .. import _default_retry_policy
from .. import _session_retry_policy
from .. import _gone_retry_policy
# pylint: disable=protected-access
async def ExecuteAsync(client, global_endpoint_manager, function, *args, **kwargs):
"""Executes the function with passed parameters applying all retry policies
:param object client:
Document client instance
:param object global_endpoint_manager:
Instance of _GlobalEndpointManager class
:param function function:
Function to be called wrapped with retries
:param (non-keyworded, variable number of arguments list) *args:
:param (keyworded, variable number of arguments list) **kwargs:
"""
# instantiate all retry policies here to be applied for each request execution
endpointDiscovery_retry_policy = _endpoint_discovery_retry_policy.EndpointDiscoveryRetryPolicy(
client.connection_policy, global_endpoint_manager, *args
)
resourceThrottle_retry_policy = _resource_throttle_retry_policy.ResourceThrottleRetryPolicy(
client.connection_policy.RetryOptions.MaxRetryAttemptCount,
client.connection_policy.RetryOptions.FixedRetryIntervalInMilliseconds,
client.connection_policy.RetryOptions.MaxWaitTimeInSeconds,
)
defaultRetry_policy = _default_retry_policy.DefaultRetryPolicy(*args)
sessionRetry_policy = _session_retry_policy._SessionRetryPolicy(
client.connection_policy.EnableEndpointDiscovery, global_endpoint_manager, *args
)
partition_key_range_gone_retry_policy = _gone_retry_policy.PartitionKeyRangeGoneRetryPolicy(client, *args)
while True:
try:
client_timeout = kwargs.get('timeout')
start_time = time.time()
if args:
result = await ExecuteFunctionAsync(function, global_endpoint_manager, *args, **kwargs)
else:
result = await ExecuteFunctionAsync(function, *args, **kwargs)
if not client.last_response_headers:
client.last_response_headers = {}
# setting the throttle related response headers before returning the result
client.last_response_headers[
HttpHeaders.ThrottleRetryCount
] = resourceThrottle_retry_policy.current_retry_attempt_count
client.last_response_headers[
HttpHeaders.ThrottleRetryWaitTimeInMs
] = resourceThrottle_retry_policy.cummulative_wait_time_in_milliseconds
return result
except exceptions.CosmosHttpResponseError as e:
retry_policy = None
if e.status_code == StatusCodes.FORBIDDEN and e.sub_status == SubStatusCodes.WRITE_FORBIDDEN:
retry_policy = endpointDiscovery_retry_policy
elif e.status_code == StatusCodes.TOO_MANY_REQUESTS:
retry_policy = resourceThrottle_retry_policy
elif (
e.status_code == StatusCodes.NOT_FOUND
and e.sub_status
and e.sub_status == SubStatusCodes.READ_SESSION_NOTAVAILABLE
):
retry_policy = sessionRetry_policy
elif exceptions._partition_range_is_gone(e):
retry_policy = partition_key_range_gone_retry_policy
else:
retry_policy = defaultRetry_policy
# If none of the retry policies applies or there is no retry needed, set the
# throttle related response headers and re-throw the exception back arg[0]
# is the request. It needs to be modified for write forbidden exception
if not retry_policy.ShouldRetry(e):
if not client.last_response_headers:
client.last_response_headers = {}
client.last_response_headers[
HttpHeaders.ThrottleRetryCount
] = resourceThrottle_retry_policy.current_retry_attempt_count
client.last_response_headers[
HttpHeaders.ThrottleRetryWaitTimeInMs
] = resourceThrottle_retry_policy.cummulative_wait_time_in_milliseconds
if args and args[0].should_clear_session_token_on_session_read_failure:
client.session.clear_session_token(client.last_response_headers)
raise
# Wait for retry_after_in_milliseconds time before the next retry
await asyncio.sleep(retry_policy.retry_after_in_milliseconds / 1000.0)
if client_timeout:
kwargs['timeout'] = client_timeout - (time.time() - start_time)
if kwargs['timeout'] <= 0:
raise exceptions.CosmosClientTimeoutError()
async def ExecuteFunctionAsync(function, *args, **kwargs):
"""Stub method so that it can be used for mocking purposes as well.
"""
return await function(*args, **kwargs)
class _ConnectionRetryPolicy(AsyncRetryPolicy):
def __init__(self, **kwargs):
clean_kwargs = {k: v for k, v in kwargs.items() if v is not None}
super(_ConnectionRetryPolicy, self).__init__(**clean_kwargs)
async def send(self, request):
"""Sends the PipelineRequest object to the next policy. Uses retry settings if necessary.
Also enforces an absolute client-side timeout that spans multiple retry attempts.
:param request: The PipelineRequest object
:type request: ~azure.core.pipeline.PipelineRequest
:return: Returns the PipelineResponse or raises error if maximum retries exceeded.
:rtype: ~azure.core.pipeline.PipelineResponse
:raises ~azure.core.exceptions.AzureError: Maximum retries exceeded.
:raises ~azure.cosmos.exceptions.CosmosClientTimeoutError: Specified timeout exceeded.
:raises ~azure.core.exceptions.ClientAuthenticationError: Authentication failed.
"""
absolute_timeout = request.context.options.pop('timeout', None)
per_request_timeout = request.context.options.pop('connection_timeout', 0)
retry_error = None
retry_active = True
response = None
retry_settings = self.configure_retries(request.context.options)
while retry_active:
try:
start_time = time.time()
_configure_timeout(request, absolute_timeout, per_request_timeout)
response = await self.next.send(request)
if self.is_retry(retry_settings, response):
retry_active = self.increment(retry_settings, response=response)
if retry_active:
await self.sleep(retry_settings, request.context.transport, response=response)
continue
break
except ClientAuthenticationError: # pylint:disable=try-except-raise
# the authentication policy failed such that the client's request can't
# succeed--we'll never have a response to it, so propagate the exception
raise
except exceptions.CosmosClientTimeoutError as timeout_error:
timeout_error.inner_exception = retry_error
timeout_error.response = response
timeout_error.history = retry_settings['history']
raise
except AzureError as err:
retry_error = err
if self._is_method_retryable(retry_settings, request.http_request):
retry_active = self.increment(retry_settings, response=request, error=err)
if retry_active:
await self.sleep(retry_settings, request.context.transport)
continue
raise err
finally:
end_time = time.time()
if absolute_timeout:
absolute_timeout -= (end_time - start_time)
self.update_context(response.context, retry_settings)
return response
|
|
from django_fsm_log.admin import StateLogInline
from fsm_admin.mixins import FSMTransitionMixin
from django_object_actions import DjangoObjectActions
# Django
from django.contrib import admin
from reversion.admin import VersionAdmin
# Local
from .models import Award
from .models import Person
from .models import Group
from .models import Chart
from .models import Convention
from .tasks import update_group_from_source
admin.site.disable_action('delete_selected')
@admin.register(Award)
class AwardAdmin(VersionAdmin, FSMTransitionMixin):
fsm_field = [
'status',
]
save_on_top = True
fields = [
'id',
'name',
'status',
'kind',
'gender',
'district',
'division',
'age',
'level',
'season',
'is_single',
'is_novice',
('threshold', 'minimum', 'spots',),
'description',
'notes',
]
list_display = [
# 'district',
'name',
# 'size',
# 'scope',
'district',
'division',
'kind',
'age',
'gender',
'level',
# 'size',
# 'scope',
# 'season',
# 'rounds',
# 'threshold',
# 'advance',
# 'minimum',
'status',
]
# list_editable = [
# 'threshold',
# 'advance',
# 'minimum',
# ]
list_filter = [
'status',
'kind',
'level',
'district',
'division',
'age',
'gender',
'season',
'is_single',
'is_novice',
]
readonly_fields = [
'id',
]
search_fields = [
'name',
]
ordering = (
'tree_sort',
)
@admin.register(Chart)
class ChartAdmin(VersionAdmin, FSMTransitionMixin):
fsm_field = [
'status',
]
fields = [
'status',
'title',
'arrangers',
'composers',
'lyricists',
'holders',
'description',
'notes',
'image',
'created',
'modified',
]
list_display = [
'status',
'title',
'arrangers',
]
list_filter = [
'status',
]
readonly_fields = [
'created',
'modified',
]
search_fields = [
'title',
'arrangers',
]
ordering = [
'title',
'arrangers',
]
@admin.register(Convention)
class ConventionAdmin(VersionAdmin, FSMTransitionMixin):
fields = (
'id',
# 'legacy_selection',
# 'legacy_complete',
'status',
'name',
('district', 'divisions', ),
('year', 'season', ),
('panel', 'kinds', ),
('open_date', 'close_date', ),
('start_date', 'end_date', ),
'owners',
'venue_name',
'location',
'timezone',
'image',
'persons',
'description',
)
list_display = (
'year',
'district',
'season',
'divisions',
'name',
'location',
# 'timezone',
'start_date',
'end_date',
# 'status',
)
list_editable = [
'name',
# 'location',
# 'start_date',
# 'end_date',
]
list_filter = (
'status',
'season',
'district',
'year',
)
fsm_field = [
'status',
]
search_fields = [
'name',
]
inlines = [
StateLogInline,
]
readonly_fields = (
'id',
)
autocomplete_fields = [
'persons',
'owners',
]
ordering = [
'-year',
'season',
'district',
]
list_select_related = [
]
save_on_top = True
@admin.register(Group)
class GroupAdmin(DjangoObjectActions, VersionAdmin, FSMTransitionMixin):
save_on_top = True
fsm_field = [
'status',
]
fieldsets = (
(None, {
'fields': (
'id',
'status',
),
}),
('Group Info (primarily from Member Center)', {
'fields': (
'name',
'kind',
'gender',
'district',
'division',
'bhs_id',
'code',
'is_senior',
'is_youth',
),
}),
('Group Info (expanded)', {
'fields': (
'description',
'image',
),
}),
('Repertory', {
'fields': (
'charts',
),
}),
('Misc', {
'fields': (
'source_id',
'owners',
'location',
'website',
'notes',
'created',
'modified',
),
}),
)
list_filter = [
'status',
'kind',
'gender',
'is_senior',
'is_youth',
'district',
'division',
]
search_fields = [
'name',
'bhs_id',
'code',
'owners__email',
]
list_display = [
'name',
'kind',
'gender',
'district',
'division',
'bhs_id',
'code',
'status',
]
list_select_related = [
]
readonly_fields = [
'id',
'created',
'modified',
]
autocomplete_fields = [
'owners',
'charts',
]
raw_id_fields = [
]
ordering = [
'kind',
'name',
]
INLINES = {
'International': [
# AwardInline,
# OfficerInline,
# ConventionInline,
StateLogInline,
],
'District': [
# AwardInline,
# OfficerInline,
# ConventionInline,
# ActiveChapterInline,
# ActiveQuartetInline,
StateLogInline,
],
'Noncompetitive': [
# OfficerInline,
# GroupInline,
StateLogInline,
],
'Affiliate': [
# OfficerInline,
# GroupInline,
StateLogInline,
],
'Chapter': [
# ActiveChorusInline,
# OfficerInline,
StateLogInline,
],
'Chorus': [
# MemberInline,
# RepertoryInline,
# EntryInline,
StateLogInline,
],
'Quartet': [
# MemberInline,
# RepertoryInline,
# EntryInline,
StateLogInline,
],
'VLQ': [
# MemberInline,
# RepertoryInline,
# EntryInline,
StateLogInline,
],
}
def get_inline_instances(self, request, obj=None):
inline_instances = []
try:
inlines = self.INLINES[obj.KIND[obj.kind]]
except AttributeError:
return inline_instances
for inline_class in inlines:
inline = inline_class(self.model, self.admin_site)
inline_instances.append(inline)
return inline_instances
def get_formsets(self, request, obj=None):
for inline in self.get_inline_instances(request, obj):
yield inline.get_formset(request, obj)
def get_queryset(self, request):
return super().get_queryset(
request
)
# ).prefetch_related('members')
def update_from_source(self, request, obj):
return update_group_from_source(obj)
update_from_source.label = "Update"
update_from_source.short_description = "Update from Source Database"
change_actions = ('update_from_source', )
@admin.register(Person)
class PersonAdmin(VersionAdmin, FSMTransitionMixin):
fields = [
'id',
'status',
('name', 'first_name', 'last_name',),
('email', 'bhs_id',),
('home_phone', 'work_phone', 'cell_phone',),
('part', 'gender',),
'district',
'source_id',
'image',
'description',
'notes',
('created', 'modified',),
'owners',
]
list_display = [
'name',
# 'district',
'email',
# 'cell_phone',
# 'part',
# 'gender',
# 'bhs_id',
]
list_filter = [
'status',
'district',
'gender',
'part',
]
readonly_fields = [
'id',
'created',
'modified',
]
fsm_field = [
'status',
]
search_fields = [
'name',
'last_name',
'first_name',
'bhs_id',
'email',
'bhs_id',
]
autocomplete_fields = [
'owners',
]
save_on_top = True
inlines = [
]
ordering = [
'last_name',
'first_name',
]
|
|
#!/usr/bin/env python
'''
MOSSE tracking sample
This sample implements correlation-based tracking approach, described in [1].
Usage:
mosse.py [--pause] [<video source>]
--pause - Start with playback paused at the first video frame.
Useful for tracking target selection.
--picam - Use this flag if using raspberry pi
Draw rectangles around objects with a mouse to track them.
Keys:
SPACE - pause video
c - clear targets
[1] David S. Bolme et al. "Visual Object Tracking using Adaptive Correlation Filters"
http://www.cs.colostate.edu/~bolme/publications/Bolme2010Tracking.pdf
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
import argparse
import imutils
import cv2
import time
import numpy as np
from imutils.video import VideoStream
from common import draw_str, RectSelector
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
def rnd_warp(a):
h, w = a.shape[:2]
T = np.zeros((2, 3))
coef = 0.2
ang = (np.random.rand()-0.5)*coef
c, s = np.cos(ang), np.sin(ang)
T[:2, :2] = [[c,-s], [s, c]]
T[:2, :2] += (np.random.rand(2, 2) - 0.5)*coef
c = (w/2, h/2)
T[:,2] = c - np.dot(T[:2, :2], c)
return cv2.warpAffine(a, T, (w, h), borderMode = cv2.BORDER_REFLECT)
def divSpec(A, B):
Ar, Ai = A[...,0], A[...,1]
Br, Bi = B[...,0], B[...,1]
C = (Ar+1j*Ai)/(Br+1j*Bi)
C = np.dstack([np.real(C), np.imag(C)]).copy()
return C
eps = 1e-5
class MOSSE:
def __init__(self, frame, rect):
x1, y1, x2, y2 = rect
w, h = map(cv2.getOptimalDFTSize, [x2-x1, y2-y1])
x1, y1 = (x1+x2-w)//2, (y1+y2-h)//2
self.pos = x, y = x1+0.5*(w-1), y1+0.5*(h-1)
self.size = w, h
img = cv2.getRectSubPix(frame, (w, h), (x, y))
self.win = cv2.createHanningWindow((w, h), cv2.CV_32F)
g = np.zeros((h, w), np.float32)
g[h//2, w//2] = 1
g = cv2.GaussianBlur(g, (-1, -1), 2.0)
g /= g.max()
self.G = cv2.dft(g, flags=cv2.DFT_COMPLEX_OUTPUT)
self.H1 = np.zeros_like(self.G)
self.H2 = np.zeros_like(self.G)
for i in xrange(128):
a = self.preprocess(rnd_warp(img))
A = cv2.dft(a, flags=cv2.DFT_COMPLEX_OUTPUT)
self.H1 += cv2.mulSpectrums(self.G, A, 0, conjB=True)
self.H2 += cv2.mulSpectrums( A, A, 0, conjB=True)
self.update_kernel()
self.update(frame)
def update(self, frame, rate = 0.125):
(x, y), (w, h) = self.pos, self.size
self.last_img = img = cv2.getRectSubPix(frame, (w, h), (x, y))
img = self.preprocess(img)
self.last_resp, (dx, dy), self.psr = self.correlate(img)
self.good = self.psr > 8.0
if not self.good:
return
self.pos = x+dx, y+dy
self.last_img = img = cv2.getRectSubPix(frame, (w, h), self.pos)
img = self.preprocess(img)
A = cv2.dft(img, flags=cv2.DFT_COMPLEX_OUTPUT)
H1 = cv2.mulSpectrums(self.G, A, 0, conjB=True)
H2 = cv2.mulSpectrums( A, A, 0, conjB=True)
self.H1 = self.H1 * (1.0-rate) + H1 * rate
self.H2 = self.H2 * (1.0-rate) + H2 * rate
self.update_kernel()
@property
def state_vis(self):
f = cv2.idft(self.H, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT )
h, w = f.shape
f = np.roll(f, -h//2, 0)
f = np.roll(f, -w//2, 1)
kernel = np.uint8( (f-f.min()) / f.ptp()*255 )
resp = self.last_resp
resp = np.uint8(np.clip(resp/resp.max(), 0, 1)*255)
vis = np.hstack([self.last_img, kernel, resp])
return vis
def draw_state(self, vis):
(x, y), (w, h) = self.pos, self.size
x1, y1, x2, y2 = int(x-0.5*w), int(y-0.5*h), int(x+0.5*w), int(y+0.5*h)
cv2.rectangle(vis, (x1, y1), (x2, y2), (0, 0, 255))
if self.good:
cv2.circle(vis, (int(x), int(y)), 2, (0, 0, 255), -1)
else:
cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255))
cv2.line(vis, (x2, y1), (x1, y2), (0, 0, 255))
draw_str(vis, (x1, y2+16), 'PSR: %.2f' % self.psr)
def preprocess(self, img):
img = np.log(np.float32(img)+1.0)
img = (img-img.mean()) / (img.std()+eps)
return img*self.win
def correlate(self, img):
C = cv2.mulSpectrums(cv2.dft(img, flags=cv2.DFT_COMPLEX_OUTPUT), self.H, 0, conjB=True)
resp = cv2.idft(C, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)
h, w = resp.shape
_, mval, _, (mx, my) = cv2.minMaxLoc(resp)
side_resp = resp.copy()
cv2.rectangle(side_resp, (mx-5, my-5), (mx+5, my+5), 0, -1)
smean, sstd = side_resp.mean(), side_resp.std()
psr = (mval-smean) / (sstd+eps)
return resp, (mx-w//2, my-h//2), psr
def update_kernel(self):
self.H = divSpec(self.H1, self.H2)
self.H[...,1] *= -1
class App:
def __init__(self, cap, paused = False):
self.cap = cap
self.frame = self.cap.read()
print(self.frame.shape)
cv2.imshow('frame', self.frame)
self.rect_sel = RectSelector('frame', self.onrect)
self.trackers = []
self.paused = paused
def onrect(self, rect):
frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
tracker = MOSSE(frame_gray, rect)
self.trackers.append(tracker)
def run(self):
# Define the codec and create VideoWriter object
#fourcc = cv2.VideoWriter_fourcc(*'DIVX')
#the spec is for my webcam, the IP camera is 1920x1080
#out = cv2.VideoWriter('output.avi',fourcc, 30.0, (1920,1080))
while True:
if not self.paused:
self.frame = self.cap.read()
frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
for tracker in self.trackers:
tracker.update(frame_gray)
vis = self.frame.copy()
for tracker in self.trackers:
tracker.draw_state(vis)
if len(self.trackers) > 0:
cv2.imshow('tracker state', self.trackers[-1].state_vis)
self.rect_sel.draw(vis)
cv2.imshow('frame', vis)
#out.write(vis)
ch = cv2.waitKey(10) & 0xFF
if ch == 27:
#out.release()
cv2.destroyAllWindows()
print("released out")
break
if ch == ord(' '):
self.paused = not self.paused
if ch == ord('c'):
self.trackers = []
if __name__ == '__main__':
print (__doc__)
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--picamera", type=int, default=-1,
help="whether or not the Raspberry Pi camera should be used")
ap.add_argument("--pause", type=bool, default=False,
help="stop on first frame")
args = vars(ap.parse_args())
cap = VideoStream(usePiCamera=args["picamera"] > 0).start()
print("letting camera warm up")
time.sleep(2.0)
App(cap, paused = args["pause"]).run()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Frontends for the message extraction functionality."""
try:
from ConfigParser import RawConfigParser
except ImportError:
from configparser import RawConfigParser
from datetime import datetime
from distutils import log
from distutils.cmd import Command
from distutils.errors import DistutilsOptionError, DistutilsSetupError
from locale import getpreferredencoding
import logging
from optparse import OptionParser
import os
import re
import shutil
import sys
import tempfile
from babel import __version__ as VERSION
from babel import Locale, localedata
from babel.core import UnknownLocaleError
from babel.messages.catalog import Catalog
from babel.messages.extract import extract_from_dir, DEFAULT_KEYWORDS, \
DEFAULT_MAPPING
from babel.messages.mofile import write_mo
from babel.messages.pofile import read_po, write_po
from babel.util import odict, LOCALTZ
from babel._compat import string_types, BytesIO
__all__ = ['CommandLineInterface', 'compile_catalog', 'extract_messages',
'init_catalog', 'check_message_extractors', 'update_catalog']
class compile_catalog(Command):
"""Catalog compilation command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import compile_catalog
setup(
...
cmdclass = {'compile_catalog': compile_catalog}
)
:since: version 0.9
:see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_
:see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_
"""
description = 'compile message catalogs to binary MO files'
user_options = [
('domain=', 'D',
"domain of PO file (default 'messages')"),
('directory=', 'd',
'path to base directory containing the catalogs'),
('input-file=', 'i',
'name of the input file'),
('output-file=', 'o',
"name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
('locale=', 'l',
'locale of the catalog to compile'),
('use-fuzzy', 'f',
'also include fuzzy translations'),
('statistics', None,
'print statistics about translations')
]
boolean_options = ['use-fuzzy', 'statistics']
def initialize_options(self):
self.domain = 'messages'
self.directory = None
self.input_file = None
self.output_file = None
self.locale = None
self.use_fuzzy = False
self.statistics = False
def finalize_options(self):
if not self.input_file and not self.directory:
raise DistutilsOptionError('you must specify either the input file '
'or the base directory')
if not self.output_file and not self.directory:
raise DistutilsOptionError('you must specify either the input file '
'or the base directory')
def run(self):
po_files = []
mo_files = []
if not self.input_file:
if self.locale:
po_files.append((self.locale,
os.path.join(self.directory, self.locale,
'LC_MESSAGES',
self.domain + '.po')))
mo_files.append(os.path.join(self.directory, self.locale,
'LC_MESSAGES',
self.domain + '.mo'))
else:
for locale in os.listdir(self.directory):
po_file = os.path.join(self.directory, locale,
'LC_MESSAGES', self.domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
mo_files.append(os.path.join(self.directory, locale,
'LC_MESSAGES',
self.domain + '.mo'))
else:
po_files.append((self.locale, self.input_file))
if self.output_file:
mo_files.append(self.output_file)
else:
mo_files.append(os.path.join(self.directory, self.locale,
'LC_MESSAGES',
self.domain + '.mo'))
if not po_files:
raise DistutilsOptionError('no message catalogs found')
for idx, (locale, po_file) in enumerate(po_files):
mo_file = mo_files[idx]
infile = open(po_file, 'r')
try:
catalog = read_po(infile, locale)
finally:
infile.close()
if self.statistics:
translated = 0
for message in list(catalog)[1:]:
if message.string:
translated +=1
percentage = 0
if len(catalog):
percentage = translated * 100 // len(catalog)
log.info('%d of %d messages (%d%%) translated in %r',
translated, len(catalog), percentage, po_file)
if catalog.fuzzy and not self.use_fuzzy:
log.warn('catalog %r is marked as fuzzy, skipping', po_file)
continue
for message, errors in catalog.check():
for error in errors:
log.error('error: %s:%d: %s', po_file, message.lineno,
error)
log.info('compiling catalog %r to %r', po_file, mo_file)
outfile = open(mo_file, 'wb')
try:
write_mo(outfile, catalog, use_fuzzy=self.use_fuzzy)
finally:
outfile.close()
class extract_messages(Command):
"""Message extraction command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import extract_messages
setup(
...
cmdclass = {'extract_messages': extract_messages}
)
:see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_
:see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_
"""
description = 'extract localizable strings from the project code'
user_options = [
('charset=', None,
'charset to use in the output file'),
('keywords=', 'k',
'space-separated list of keywords to look for in addition to the '
'defaults'),
('no-default-keywords', None,
'do not include the default keywords'),
('mapping-file=', 'F',
'path to the mapping configuration file'),
('no-location', None,
'do not include location comments with filename and line number'),
('omit-header', None,
'do not include msgid "" entry in header'),
('output-file=', 'o',
'name of the output file'),
('width=', 'w',
'set output line width (default 76)'),
('no-wrap', None,
'do not break long message lines, longer than the output line width, '
'into several lines'),
('sort-output', None,
'generate sorted output (default False)'),
('sort-by-file', None,
'sort output by file location (default False)'),
('msgid-bugs-address=', None,
'set report address for msgid'),
('copyright-holder=', None,
'set copyright holder in output'),
('add-comments=', 'c',
'place comment block with TAG (or those preceding keyword lines) in '
'output file. Separate multiple TAGs with commas(,)'),
('strip-comments', None,
'strip the comment TAGs from the comments.'),
('input-dirs=', None,
'directories that should be scanned for messages. Separate multiple '
'directories with commas(,)'),
]
boolean_options = [
'no-default-keywords', 'no-location', 'omit-header', 'no-wrap',
'sort-output', 'sort-by-file', 'strip-comments'
]
def initialize_options(self):
self.charset = 'utf-8'
self.keywords = ''
self._keywords = DEFAULT_KEYWORDS.copy()
self.no_default_keywords = False
self.mapping_file = None
self.no_location = False
self.omit_header = False
self.output_file = None
self.input_dirs = None
self.width = None
self.no_wrap = False
self.sort_output = False
self.sort_by_file = False
self.msgid_bugs_address = None
self.copyright_holder = None
self.add_comments = None
self._add_comments = []
self.strip_comments = False
def finalize_options(self):
if self.no_default_keywords and not self.keywords:
raise DistutilsOptionError('you must specify new keywords if you '
'disable the default ones')
if self.no_default_keywords:
self._keywords = {}
if self.keywords:
self._keywords.update(parse_keywords(self.keywords.split()))
if not self.output_file:
raise DistutilsOptionError('no output file specified')
if self.no_wrap and self.width:
raise DistutilsOptionError("'--no-wrap' and '--width' are mutually "
"exclusive")
if not self.no_wrap and not self.width:
self.width = 76
elif self.width is not None:
self.width = int(self.width)
if self.sort_output and self.sort_by_file:
raise DistutilsOptionError("'--sort-output' and '--sort-by-file' "
"are mutually exclusive")
if self.input_dirs:
self.input_dirs = re.split(',\s*', self.input_dirs)
else:
self.input_dirs = dict.fromkeys([k.split('.',1)[0]
for k in self.distribution.packages
]).keys()
if self.add_comments:
self._add_comments = self.add_comments.split(',')
def run(self):
mappings = self._get_mappings()
outfile = open(self.output_file, 'wb')
try:
catalog = Catalog(project=self.distribution.get_name(),
version=self.distribution.get_version(),
msgid_bugs_address=self.msgid_bugs_address,
copyright_holder=self.copyright_holder,
charset=self.charset)
for dirname, (method_map, options_map) in mappings.items():
def callback(filename, method, options):
if method == 'ignore':
return
filepath = os.path.normpath(os.path.join(dirname, filename))
optstr = ''
if options:
optstr = ' (%s)' % ', '.join(['%s="%s"' % (k, v) for
k, v in options.items()])
log.info('extracting messages from %s%s', filepath, optstr)
extracted = extract_from_dir(dirname, method_map, options_map,
keywords=self._keywords,
comment_tags=self._add_comments,
callback=callback,
strip_comment_tags=
self.strip_comments)
for filename, lineno, message, comments, context in extracted:
filepath = os.path.normpath(os.path.join(dirname, filename))
catalog.add(message, None, [(filepath, lineno)],
auto_comments=comments, context=context)
log.info('writing PO template file to %s' % self.output_file)
write_po(outfile, catalog, width=self.width,
no_location=self.no_location,
omit_header=self.omit_header,
sort_output=self.sort_output,
sort_by_file=self.sort_by_file)
finally:
outfile.close()
def _get_mappings(self):
mappings = {}
if self.mapping_file:
fileobj = open(self.mapping_file, 'U')
try:
method_map, options_map = parse_mapping(fileobj)
for dirname in self.input_dirs:
mappings[dirname] = method_map, options_map
finally:
fileobj.close()
elif getattr(self.distribution, 'message_extractors', None):
message_extractors = self.distribution.message_extractors
for dirname, mapping in message_extractors.items():
if isinstance(mapping, string_types):
method_map, options_map = parse_mapping(BytesIO(mapping))
else:
method_map, options_map = [], {}
for pattern, method, options in mapping:
method_map.append((pattern, method))
options_map[pattern] = options or {}
mappings[dirname] = method_map, options_map
else:
for dirname in self.input_dirs:
mappings[dirname] = DEFAULT_MAPPING, {}
return mappings
def check_message_extractors(dist, name, value):
"""Validate the ``message_extractors`` keyword argument to ``setup()``.
:param dist: the distutils/setuptools ``Distribution`` object
:param name: the name of the keyword argument (should always be
"message_extractors")
:param value: the value of the keyword argument
:raise `DistutilsSetupError`: if the value is not valid
:see: `Adding setup() arguments
<http://peak.telecommunity.com/DevCenter/setuptools#adding-setup-arguments>`_
"""
assert name == 'message_extractors'
if not isinstance(value, dict):
raise DistutilsSetupError('the value of the "message_extractors" '
'parameter must be a dictionary')
class init_catalog(Command):
"""New catalog initialization command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import init_catalog
setup(
...
cmdclass = {'init_catalog': init_catalog}
)
:see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_
:see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_
"""
description = 'create a new catalog based on a POT file'
user_options = [
('domain=', 'D',
"domain of PO file (default 'messages')"),
('input-file=', 'i',
'name of the input file'),
('output-dir=', 'd',
'path to output directory'),
('output-file=', 'o',
"name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
('locale=', 'l',
'locale for the new localized catalog'),
('width=', 'w',
'set output line width (default 76)'),
('no-wrap', None,
'do not break long message lines, longer than the output line width, '
'into several lines'),
]
boolean_options = ['no-wrap']
def initialize_options(self):
self.output_dir = None
self.output_file = None
self.input_file = None
self.locale = None
self.domain = 'messages'
self.no_wrap = False
self.width = None
def finalize_options(self):
if not self.input_file:
raise DistutilsOptionError('you must specify the input file')
if not self.locale:
raise DistutilsOptionError('you must provide a locale for the '
'new catalog')
try:
self._locale = Locale.parse(self.locale)
except UnknownLocaleError as e:
raise DistutilsOptionError(e)
if not self.output_file and not self.output_dir:
raise DistutilsOptionError('you must specify the output directory')
if not self.output_file:
self.output_file = os.path.join(self.output_dir, self.locale,
'LC_MESSAGES', self.domain + '.po')
if not os.path.exists(os.path.dirname(self.output_file)):
os.makedirs(os.path.dirname(self.output_file))
if self.no_wrap and self.width:
raise DistutilsOptionError("'--no-wrap' and '--width' are mutually "
"exclusive")
if not self.no_wrap and not self.width:
self.width = 76
elif self.width is not None:
self.width = int(self.width)
def run(self):
log.info('creating catalog %r based on %r', self.output_file,
self.input_file)
infile = open(self.input_file, 'r')
try:
# Although reading from the catalog template, read_po must be fed
# the locale in order to correctly calculate plurals
catalog = read_po(infile, locale=self.locale)
finally:
infile.close()
catalog.locale = self._locale
catalog.revision_date = datetime.now(LOCALTZ)
catalog.fuzzy = False
outfile = open(self.output_file, 'wb')
try:
write_po(outfile, catalog, width=self.width)
finally:
outfile.close()
class update_catalog(Command):
"""Catalog merging command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import update_catalog
setup(
...
cmdclass = {'update_catalog': update_catalog}
)
:since: version 0.9
:see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_
:see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_
"""
description = 'update message catalogs from a POT file'
user_options = [
('domain=', 'D',
"domain of PO file (default 'messages')"),
('input-file=', 'i',
'name of the input file'),
('output-dir=', 'd',
'path to base directory containing the catalogs'),
('output-file=', 'o',
"name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
('locale=', 'l',
'locale of the catalog to compile'),
('width=', 'w',
'set output line width (default 76)'),
('no-wrap', None,
'do not break long message lines, longer than the output line width, '
'into several lines'),
('ignore-obsolete=', None,
'whether to omit obsolete messages from the output'),
('no-fuzzy-matching', 'N',
'do not use fuzzy matching'),
('previous', None,
'keep previous msgids of translated messages')
]
boolean_options = ['ignore_obsolete', 'no_fuzzy_matching', 'previous']
def initialize_options(self):
self.domain = 'messages'
self.input_file = None
self.output_dir = None
self.output_file = None
self.locale = None
self.width = None
self.no_wrap = False
self.ignore_obsolete = False
self.no_fuzzy_matching = False
self.previous = False
def finalize_options(self):
if not self.input_file:
raise DistutilsOptionError('you must specify the input file')
if not self.output_file and not self.output_dir:
raise DistutilsOptionError('you must specify the output file or '
'directory')
if self.output_file and not self.locale:
raise DistutilsOptionError('you must specify the locale')
if self.no_wrap and self.width:
raise DistutilsOptionError("'--no-wrap' and '--width' are mutually "
"exclusive")
if not self.no_wrap and not self.width:
self.width = 76
elif self.width is not None:
self.width = int(self.width)
if self.no_fuzzy_matching and self.previous:
self.previous = False
def run(self):
po_files = []
if not self.output_file:
if self.locale:
po_files.append((self.locale,
os.path.join(self.output_dir, self.locale,
'LC_MESSAGES',
self.domain + '.po')))
else:
for locale in os.listdir(self.output_dir):
po_file = os.path.join(self.output_dir, locale,
'LC_MESSAGES',
self.domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
else:
po_files.append((self.locale, self.output_file))
domain = self.domain
if not domain:
domain = os.path.splitext(os.path.basename(self.input_file))[0]
infile = open(self.input_file, 'U')
try:
template = read_po(infile)
finally:
infile.close()
if not po_files:
raise DistutilsOptionError('no message catalogs found')
for locale, filename in po_files:
log.info('updating catalog %r based on %r', filename,
self.input_file)
infile = open(filename, 'U')
try:
catalog = read_po(infile, locale=locale, domain=domain)
finally:
infile.close()
catalog.update(template, self.no_fuzzy_matching)
tmpname = os.path.join(os.path.dirname(filename),
tempfile.gettempprefix() +
os.path.basename(filename))
tmpfile = open(tmpname, 'w')
try:
try:
write_po(tmpfile, catalog,
ignore_obsolete=self.ignore_obsolete,
include_previous=self.previous, width=self.width)
finally:
tmpfile.close()
except:
os.remove(tmpname)
raise
try:
os.rename(tmpname, filename)
except OSError:
# We're probably on Windows, which doesn't support atomic
# renames, at least not through Python
# If the error is in fact due to a permissions problem, that
# same error is going to be raised from one of the following
# operations
os.remove(filename)
shutil.copy(tmpname, filename)
os.remove(tmpname)
class CommandLineInterface(object):
"""Command-line interface.
This class provides a simple command-line interface to the message
extraction and PO file generation functionality.
"""
usage = '%%prog %s [options] %s'
version = '%%prog %s' % VERSION
commands = {
'compile': 'compile message catalogs to MO files',
'extract': 'extract messages from source files and generate a POT file',
'init': 'create new message catalogs from a POT file',
'update': 'update existing message catalogs from a POT file'
}
def run(self, argv=sys.argv):
"""Main entry point of the command-line interface.
:param argv: list of arguments passed on the command-line
"""
self.parser = OptionParser(usage=self.usage % ('command', '[args]'),
version=self.version)
self.parser.disable_interspersed_args()
self.parser.print_help = self._help
self.parser.add_option('--list-locales', dest='list_locales',
action='store_true',
help="print all known locales and exit")
self.parser.add_option('-v', '--verbose', action='store_const',
dest='loglevel', const=logging.DEBUG,
help='print as much as possible')
self.parser.add_option('-q', '--quiet', action='store_const',
dest='loglevel', const=logging.ERROR,
help='print as little as possible')
self.parser.set_defaults(list_locales=False, loglevel=logging.INFO)
options, args = self.parser.parse_args(argv[1:])
self._configure_logging(options.loglevel)
if options.list_locales:
identifiers = localedata.locale_identifiers()
longest = max([len(identifier) for identifier in identifiers])
identifiers.sort()
format = u'%%-%ds %%s' % (longest + 1)
for identifier in identifiers:
locale = Locale.parse(identifier)
output = format % (identifier, locale.english_name)
print(output.encode(sys.stdout.encoding or
getpreferredencoding() or
'ascii', 'replace'))
return 0
if not args:
self.parser.error('no valid command or option passed. '
'Try the -h/--help option for more information.')
cmdname = args[0]
if cmdname not in self.commands:
self.parser.error('unknown command "%s"' % cmdname)
return getattr(self, cmdname)(args[1:])
def _configure_logging(self, loglevel):
self.log = logging.getLogger('babel')
self.log.setLevel(loglevel)
# Don't add a new handler for every instance initialization (#227), this
# would cause duplicated output when the CommandLineInterface as an
# normal Python class.
if self.log.handlers:
handler = self.log.handlers[0]
else:
handler = logging.StreamHandler()
self.log.addHandler(handler)
handler.setLevel(loglevel)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
def _help(self):
print(self.parser.format_help())
print("commands:")
longest = max([len(command) for command in self.commands])
format = " %%-%ds %%s" % max(8, longest + 1)
commands = sorted(self.commands.items())
for name, description in commands:
print(format % (name, description))
def compile(self, argv):
"""Subcommand for compiling a message catalog to a MO file.
:param argv: the command arguments
:since: version 0.9
"""
parser = OptionParser(usage=self.usage % ('compile', ''),
description=self.commands['compile'])
parser.add_option('--domain', '-D', dest='domain',
help="domain of MO and PO files (default '%default')")
parser.add_option('--directory', '-d', dest='directory',
metavar='DIR', help='base directory of catalog files')
parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE',
help='locale of the catalog')
parser.add_option('--input-file', '-i', dest='input_file',
metavar='FILE', help='name of the input file')
parser.add_option('--output-file', '-o', dest='output_file',
metavar='FILE',
help="name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/"
"<domain>.mo')")
parser.add_option('--use-fuzzy', '-f', dest='use_fuzzy',
action='store_true',
help='also include fuzzy translations (default '
'%default)')
parser.add_option('--statistics', dest='statistics',
action='store_true',
help='print statistics about translations')
parser.set_defaults(domain='messages', use_fuzzy=False,
compile_all=False, statistics=False)
options, args = parser.parse_args(argv)
po_files = []
mo_files = []
if not options.input_file:
if not options.directory:
parser.error('you must specify either the input file or the '
'base directory')
if options.locale:
po_files.append((options.locale,
os.path.join(options.directory,
options.locale, 'LC_MESSAGES',
options.domain + '.po')))
mo_files.append(os.path.join(options.directory, options.locale,
'LC_MESSAGES',
options.domain + '.mo'))
else:
for locale in os.listdir(options.directory):
po_file = os.path.join(options.directory, locale,
'LC_MESSAGES', options.domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
mo_files.append(os.path.join(options.directory, locale,
'LC_MESSAGES',
options.domain + '.mo'))
else:
po_files.append((options.locale, options.input_file))
if options.output_file:
mo_files.append(options.output_file)
else:
if not options.directory:
parser.error('you must specify either the input file or '
'the base directory')
mo_files.append(os.path.join(options.directory, options.locale,
'LC_MESSAGES',
options.domain + '.mo'))
if not po_files:
parser.error('no message catalogs found')
for idx, (locale, po_file) in enumerate(po_files):
mo_file = mo_files[idx]
infile = open(po_file, 'r')
try:
catalog = read_po(infile, locale)
finally:
infile.close()
if options.statistics:
translated = 0
for message in list(catalog)[1:]:
if message.string:
translated +=1
percentage = 0
if len(catalog):
percentage = translated * 100 // len(catalog)
self.log.info("%d of %d messages (%d%%) translated in %r",
translated, len(catalog), percentage, po_file)
if catalog.fuzzy and not options.use_fuzzy:
self.log.warning('catalog %r is marked as fuzzy, skipping',
po_file)
continue
for message, errors in catalog.check():
for error in errors:
self.log.error('error: %s:%d: %s', po_file, message.lineno,
error)
self.log.info('compiling catalog %r to %r', po_file, mo_file)
outfile = open(mo_file, 'wb')
try:
write_mo(outfile, catalog, use_fuzzy=options.use_fuzzy)
finally:
outfile.close()
def extract(self, argv):
"""Subcommand for extracting messages from source files and generating
a POT file.
:param argv: the command arguments
"""
parser = OptionParser(usage=self.usage % ('extract', 'dir1 <dir2> ...'),
description=self.commands['extract'])
parser.add_option('--charset', dest='charset',
help='charset to use in the output (default '
'"%default")')
parser.add_option('-k', '--keyword', dest='keywords', action='append',
help='keywords to look for in addition to the '
'defaults. You can specify multiple -k flags on '
'the command line.')
parser.add_option('--no-default-keywords', dest='no_default_keywords',
action='store_true',
help="do not include the default keywords")
parser.add_option('--mapping', '-F', dest='mapping_file',
help='path to the extraction mapping file')
parser.add_option('--no-location', dest='no_location',
action='store_true',
help='do not include location comments with filename '
'and line number')
parser.add_option('--omit-header', dest='omit_header',
action='store_true',
help='do not include msgid "" entry in header')
parser.add_option('-o', '--output', dest='output',
help='path to the output POT file')
parser.add_option('-w', '--width', dest='width', type='int',
help="set output line width (default 76)")
parser.add_option('--no-wrap', dest='no_wrap', action = 'store_true',
help='do not break long message lines, longer than '
'the output line width, into several lines')
parser.add_option('--sort-output', dest='sort_output',
action='store_true',
help='generate sorted output (default False)')
parser.add_option('--sort-by-file', dest='sort_by_file',
action='store_true',
help='sort output by file location (default False)')
parser.add_option('--msgid-bugs-address', dest='msgid_bugs_address',
metavar='EMAIL@ADDRESS',
help='set report address for msgid')
parser.add_option('--copyright-holder', dest='copyright_holder',
help='set copyright holder in output')
parser.add_option('--project', dest='project',
help='set project name in output')
parser.add_option('--version', dest='version',
help='set project version in output')
parser.add_option('--add-comments', '-c', dest='comment_tags',
metavar='TAG', action='append',
help='place comment block with TAG (or those '
'preceding keyword lines) in output file. One '
'TAG per argument call')
parser.add_option('--strip-comment-tags', '-s',
dest='strip_comment_tags', action='store_true',
help='Strip the comment tags from the comments.')
parser.set_defaults(charset='utf-8', keywords=[],
no_default_keywords=False, no_location=False,
omit_header = False, width=None, no_wrap=False,
sort_output=False, sort_by_file=False,
comment_tags=[], strip_comment_tags=False)
options, args = parser.parse_args(argv)
if not args:
parser.error('incorrect number of arguments')
keywords = DEFAULT_KEYWORDS.copy()
if options.no_default_keywords:
if not options.keywords:
parser.error('you must specify new keywords if you disable the '
'default ones')
keywords = {}
if options.keywords:
keywords.update(parse_keywords(options.keywords))
if options.mapping_file:
fileobj = open(options.mapping_file, 'U')
try:
method_map, options_map = parse_mapping(fileobj)
finally:
fileobj.close()
else:
method_map = DEFAULT_MAPPING
options_map = {}
if options.width and options.no_wrap:
parser.error("'--no-wrap' and '--width' are mutually exclusive.")
elif not options.width and not options.no_wrap:
options.width = 76
if options.sort_output and options.sort_by_file:
parser.error("'--sort-output' and '--sort-by-file' are mutually "
"exclusive")
catalog = Catalog(project=options.project,
version=options.version,
msgid_bugs_address=options.msgid_bugs_address,
copyright_holder=options.copyright_holder,
charset=options.charset)
for dirname in args:
if not os.path.isdir(dirname):
parser.error('%r is not a directory' % dirname)
def callback(filename, method, options):
if method == 'ignore':
return
filepath = os.path.normpath(os.path.join(dirname, filename))
optstr = ''
if options:
optstr = ' (%s)' % ', '.join(['%s="%s"' % (k, v) for
k, v in options.items()])
self.log.info('extracting messages from %s%s', filepath,
optstr)
extracted = extract_from_dir(dirname, method_map, options_map,
keywords, options.comment_tags,
callback=callback,
strip_comment_tags=
options.strip_comment_tags)
for filename, lineno, message, comments, context in extracted:
filepath = os.path.normpath(os.path.join(dirname, filename))
catalog.add(message, None, [(filepath, lineno)],
auto_comments=comments, context=context)
if options.output not in (None, '-'):
self.log.info('writing PO template file to %s' % options.output)
outfile = open(options.output, 'wb')
close_output = True
else:
outfile = sys.stdout
close_output = False
try:
print(outfile)
write_po(outfile, catalog, width=options.width,
no_location=options.no_location,
omit_header=options.omit_header,
sort_output=options.sort_output,
sort_by_file=options.sort_by_file)
finally:
if close_output:
outfile.close()
def init(self, argv):
"""Subcommand for creating new message catalogs from a template.
:param argv: the command arguments
"""
parser = OptionParser(usage=self.usage % ('init', ''),
description=self.commands['init'])
parser.add_option('--domain', '-D', dest='domain',
help="domain of PO file (default '%default')")
parser.add_option('--input-file', '-i', dest='input_file',
metavar='FILE', help='name of the input file')
parser.add_option('--output-dir', '-d', dest='output_dir',
metavar='DIR', help='path to output directory')
parser.add_option('--output-file', '-o', dest='output_file',
metavar='FILE',
help="name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/"
"<domain>.po')")
parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE',
help='locale for the new localized catalog')
parser.add_option('-w', '--width', dest='width', type='int',
help="set output line width (default 76)")
parser.add_option('--no-wrap', dest='no_wrap', action='store_true',
help='do not break long message lines, longer than '
'the output line width, into several lines')
parser.set_defaults(domain='messages')
options, args = parser.parse_args(argv)
if not options.locale:
parser.error('you must provide a locale for the new catalog')
try:
locale = Locale.parse(options.locale)
except UnknownLocaleError as e:
parser.error(e)
if not options.input_file:
parser.error('you must specify the input file')
if not options.output_file and not options.output_dir:
parser.error('you must specify the output file or directory')
if not options.output_file:
options.output_file = os.path.join(options.output_dir,
options.locale, 'LC_MESSAGES',
options.domain + '.po')
if not os.path.exists(os.path.dirname(options.output_file)):
os.makedirs(os.path.dirname(options.output_file))
if options.width and options.no_wrap:
parser.error("'--no-wrap' and '--width' are mutually exclusive.")
elif not options.width and not options.no_wrap:
options.width = 76
infile = open(options.input_file, 'r')
try:
# Although reading from the catalog template, read_po must be fed
# the locale in order to correctly calculate plurals
catalog = read_po(infile, locale=options.locale)
finally:
infile.close()
catalog.locale = locale
catalog.revision_date = datetime.now(LOCALTZ)
self.log.info('creating catalog %r based on %r', options.output_file,
options.input_file)
outfile = open(options.output_file, 'wb')
try:
write_po(outfile, catalog, width=options.width)
finally:
outfile.close()
def update(self, argv):
"""Subcommand for updating existing message catalogs from a template.
:param argv: the command arguments
:since: version 0.9
"""
parser = OptionParser(usage=self.usage % ('update', ''),
description=self.commands['update'])
parser.add_option('--domain', '-D', dest='domain',
help="domain of PO file (default '%default')")
parser.add_option('--input-file', '-i', dest='input_file',
metavar='FILE', help='name of the input file')
parser.add_option('--output-dir', '-d', dest='output_dir',
metavar='DIR', help='path to output directory')
parser.add_option('--output-file', '-o', dest='output_file',
metavar='FILE',
help="name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/"
"<domain>.po')")
parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE',
help='locale of the translations catalog')
parser.add_option('-w', '--width', dest='width', type='int',
help="set output line width (default 76)")
parser.add_option('--no-wrap', dest='no_wrap', action = 'store_true',
help='do not break long message lines, longer than '
'the output line width, into several lines')
parser.add_option('--ignore-obsolete', dest='ignore_obsolete',
action='store_true',
help='do not include obsolete messages in the output '
'(default %default)')
parser.add_option('--no-fuzzy-matching', '-N', dest='no_fuzzy_matching',
action='store_true',
help='do not use fuzzy matching (default %default)')
parser.add_option('--previous', dest='previous', action='store_true',
help='keep previous msgids of translated messages '
'(default %default)')
parser.set_defaults(domain='messages', ignore_obsolete=False,
no_fuzzy_matching=False, previous=False)
options, args = parser.parse_args(argv)
if not options.input_file:
parser.error('you must specify the input file')
if not options.output_file and not options.output_dir:
parser.error('you must specify the output file or directory')
if options.output_file and not options.locale:
parser.error('you must specify the locale')
if options.no_fuzzy_matching and options.previous:
options.previous = False
po_files = []
if not options.output_file:
if options.locale:
po_files.append((options.locale,
os.path.join(options.output_dir,
options.locale, 'LC_MESSAGES',
options.domain + '.po')))
else:
for locale in os.listdir(options.output_dir):
po_file = os.path.join(options.output_dir, locale,
'LC_MESSAGES',
options.domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
else:
po_files.append((options.locale, options.output_file))
domain = options.domain
if not domain:
domain = os.path.splitext(os.path.basename(options.input_file))[0]
infile = open(options.input_file, 'U')
try:
template = read_po(infile)
finally:
infile.close()
if not po_files:
parser.error('no message catalogs found')
if options.width and options.no_wrap:
parser.error("'--no-wrap' and '--width' are mutually exclusive.")
elif not options.width and not options.no_wrap:
options.width = 76
for locale, filename in po_files:
self.log.info('updating catalog %r based on %r', filename,
options.input_file)
infile = open(filename, 'U')
try:
catalog = read_po(infile, locale=locale, domain=domain)
finally:
infile.close()
catalog.update(template, options.no_fuzzy_matching)
tmpname = os.path.join(os.path.dirname(filename),
tempfile.gettempprefix() +
os.path.basename(filename))
tmpfile = open(tmpname, 'w')
try:
try:
write_po(tmpfile, catalog,
ignore_obsolete=options.ignore_obsolete,
include_previous=options.previous,
width=options.width)
finally:
tmpfile.close()
except:
os.remove(tmpname)
raise
try:
os.rename(tmpname, filename)
except OSError:
# We're probably on Windows, which doesn't support atomic
# renames, at least not through Python
# If the error is in fact due to a permissions problem, that
# same error is going to be raised from one of the following
# operations
os.remove(filename)
shutil.copy(tmpname, filename)
os.remove(tmpname)
def main():
return CommandLineInterface().run(sys.argv)
def parse_mapping(fileobj, filename=None):
"""Parse an extraction method mapping from a file-like object.
>>> buf = BytesIO(b'''
... [extractors]
... custom = mypackage.module:myfunc
...
... # Python source files
... [python: **.py]
...
... # Genshi templates
... [genshi: **/templates/**.html]
... include_attrs =
... [genshi: **/templates/**.txt]
... template_class = genshi.template:TextTemplate
... encoding = latin-1
...
... # Some custom extractor
... [custom: **/custom/*.*]
... ''')
>>> method_map, options_map = parse_mapping(buf)
>>> len(method_map)
4
>>> method_map[0]
('**.py', 'python')
>>> options_map['**.py']
{}
>>> method_map[1]
('**/templates/**.html', 'genshi')
>>> options_map['**/templates/**.html']['include_attrs']
''
>>> method_map[2]
('**/templates/**.txt', 'genshi')
>>> options_map['**/templates/**.txt']['template_class']
'genshi.template:TextTemplate'
>>> options_map['**/templates/**.txt']['encoding']
'latin-1'
>>> method_map[3]
('**/custom/*.*', 'mypackage.module:myfunc')
>>> options_map['**/custom/*.*']
{}
:param fileobj: a readable file-like object containing the configuration
text to parse
:return: a `(method_map, options_map)` tuple
:rtype: `tuple`
:see: `extract_from_directory`
"""
extractors = {}
method_map = []
options_map = {}
parser = RawConfigParser()
parser._sections = odict(parser._sections) # We need ordered sections
parser.readfp(fileobj, filename)
for section in parser.sections():
if section == 'extractors':
extractors = dict(parser.items(section))
else:
method, pattern = [part.strip() for part in section.split(':', 1)]
method_map.append((pattern, method))
options_map[pattern] = dict(parser.items(section))
if extractors:
for idx, (pattern, method) in enumerate(method_map):
if method in extractors:
method = extractors[method]
method_map[idx] = (pattern, method)
return (method_map, options_map)
def parse_keywords(strings=[]):
"""Parse keywords specifications from the given list of strings.
>>> kw = parse_keywords(['_', 'dgettext:2', 'dngettext:2,3', 'pgettext:1c,2']).items()
>>> kw.sort()
>>> for keyword, indices in kw:
... print (keyword, indices)
('_', None)
('dgettext', (2,))
('dngettext', (2, 3))
('pgettext', ((1, 'c'), 2))
"""
keywords = {}
for string in strings:
if ':' in string:
funcname, indices = string.split(':')
else:
funcname, indices = string, None
if funcname not in keywords:
if indices:
inds = []
for x in indices.split(','):
if x[-1] == 'c':
inds.append((int(x[:-1]), 'c'))
else:
inds.append(int(x))
indices = tuple(inds)
keywords[funcname] = indices
return keywords
if __name__ == '__main__':
main()
|
|
"""
Julia code printer
The `JuliaCodePrinter` converts SymPy expressions into Julia expressions.
A complete code generator, which uses `julia_code` extensively, can be found
in `sympy.utilities.codegen`. The `codegen` module can be used to generate
complete source code files.
"""
from __future__ import print_function, division
from sympy.core import Mul, Pow, S, Rational
from sympy.core.compatibility import string_types, range
from sympy.core.mul import _keep_coeff
from sympy.printing.codeprinter import CodePrinter, Assignment
from sympy.printing.precedence import precedence
from re import search
# List of known functions. First, those that have the same name in
# SymPy and Julia. This is almost certainly incomplete!
known_fcns_src1 = ["sin", "cos", "tan", "cot", "sec", "csc",
"asin", "acos", "atan", "acot", "asec", "acsc",
"sinh", "cosh", "tanh", "coth", "sech", "csch",
"asinh", "acosh", "atanh", "acoth", "asech",
"sinc", "atan2", "sign", "floor", "log", "exp",
"cbrt", "sqrt", "erf", "erfc", "erfi",
"factorial", "gamma", "digamma", "trigamma",
"polygamma", "beta",
"airyai", "airyaiprime", "airybi", "airybiprime",
"besselj", "bessely", "besseli", "besselk",
"erfinv", "erfcinv"]
# These functions have different names ("Sympy": "Julia"), more
# generally a mapping to (argument_conditions, julia_function).
known_fcns_src2 = {
"Abs": "abs",
"ceiling": "ceil",
"conjugate": "conj",
"hankel1": "hankelh1",
"hankel2": "hankelh2",
"im": "imag",
"re": "real"
}
class JuliaCodePrinter(CodePrinter):
"""
A printer to convert expressions to strings of Julia code.
"""
printmethod = "_julia"
language = "Julia"
_operators = {
'and': '&&',
'or': '||',
'not': '!',
}
_default_settings = {
'order': None,
'full_prec': 'auto',
'precision': 16,
'user_functions': {},
'human': True,
'contract': True,
'inline': True,
}
# Note: contract is for expressing tensors as loops (if True), or just
# assignment (if False). FIXME: this should be looked a more carefully
# for Julia.
def __init__(self, settings={}):
super(JuliaCodePrinter, self).__init__(settings)
self.known_functions = dict(zip(known_fcns_src1, known_fcns_src1))
self.known_functions.update(dict(known_fcns_src2))
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
def _rate_index_position(self, p):
return p*5
def _get_statement(self, codestring):
return "%s" % codestring
def _get_comment(self, text):
return "# {0}".format(text)
def _declare_number_const(self, name, value):
return "const {0} = {1}".format(name, value)
def _format_code(self, lines):
return self.indent_code(lines)
def _traverse_matrix_indices(self, mat):
# Julia uses Fortran order (column-major)
rows, cols = mat.shape
return ((i, j) for j in range(cols) for i in range(rows))
def _get_loop_opening_ending(self, indices):
open_lines = []
close_lines = []
for i in indices:
# Julia arrays start at 1 and end at dimension
var, start, stop = map(self._print,
[i.label, i.lower + 1, i.upper + 1])
open_lines.append("for %s = %s:%s" % (var, start, stop))
close_lines.append("end")
return open_lines, close_lines
def _print_Mul(self, expr):
# print complex numbers nicely in Julia
if (expr.is_number and expr.is_imaginary and
expr.as_coeff_Mul()[0].is_integer):
return "%sim" % self._print(-S.ImaginaryUnit*expr)
# cribbed from str.py
prec = precedence(expr)
c, e = expr.as_coeff_Mul()
if c < 0:
expr = _keep_coeff(-c, e)
sign = "-"
else:
sign = ""
a = [] # items in the numerator
b = [] # items that are in the denominator (if any)
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
# use make_args in case expr was something like -x -> x
args = Mul.make_args(expr)
# Gather args for numerator/denominator
for item in args:
if (item.is_commutative and item.is_Pow and item.exp.is_Rational
and item.exp.is_negative):
if item.exp != -1:
b.append(Pow(item.base, -item.exp, evaluate=False))
else:
b.append(Pow(item.base, -item.exp))
elif item.is_Rational and item is not S.Infinity:
if item.p != 1:
a.append(Rational(item.p))
if item.q != 1:
b.append(Rational(item.q))
else:
a.append(item)
a = a or [S.One]
a_str = [self.parenthesize(x, prec) for x in a]
b_str = [self.parenthesize(x, prec) for x in b]
# from here it differs from str.py to deal with "*" and ".*"
def multjoin(a, a_str):
# here we probably are assuming the constants will come first
r = a_str[0]
for i in range(1, len(a)):
mulsym = '*' if a[i-1].is_number else '.*'
r = r + mulsym + a_str[i]
return r
if len(b) == 0:
return sign + multjoin(a, a_str)
elif len(b) == 1:
divsym = '/' if b[0].is_number else './'
return sign + multjoin(a, a_str) + divsym + b_str[0]
else:
divsym = '/' if all([bi.is_number for bi in b]) else './'
return (sign + multjoin(a, a_str) +
divsym + "(%s)" % multjoin(b, b_str))
def _print_Pow(self, expr):
powsymbol = '^' if all([x.is_number for x in expr.args]) else '.^'
PREC = precedence(expr)
if expr.exp == S.Half:
return "sqrt(%s)" % self._print(expr.base)
if expr.is_commutative:
if expr.exp == -S.Half:
sym = '/' if expr.base.is_number else './'
return "1" + sym + "sqrt(%s)" % self._print(expr.base)
if expr.exp == -S.One:
sym = '/' if expr.base.is_number else './'
return "1" + sym + "%s" % self.parenthesize(expr.base, PREC)
return '%s%s%s' % (self.parenthesize(expr.base, PREC), powsymbol,
self.parenthesize(expr.exp, PREC))
def _print_MatPow(self, expr):
PREC = precedence(expr)
return '%s^%s' % (self.parenthesize(expr.base, PREC),
self.parenthesize(expr.exp, PREC))
def _print_Pi(self, expr):
if self._settings["inline"]:
return "pi"
else:
return super(JuliaCodePrinter, self)._print_NumberSymbol(expr)
def _print_ImaginaryUnit(self, expr):
return "im"
def _print_Exp1(self, expr):
if self._settings["inline"]:
return "e"
else:
return super(JuliaCodePrinter, self)._print_NumberSymbol(expr)
def _print_EulerGamma(self, expr):
if self._settings["inline"]:
return "eulergamma"
else:
return super(JuliaCodePrinter, self)._print_NumberSymbol(expr)
def _print_Catalan(self, expr):
if self._settings["inline"]:
return "catalan"
else:
return super(JuliaCodePrinter, self)._print_NumberSymbol(expr)
def _print_GoldenRatio(self, expr):
if self._settings["inline"]:
return "golden"
else:
return super(JuliaCodePrinter, self)._print_NumberSymbol(expr)
def _print_NumberSymbol(self, expr):
if self._settings["inline"]:
return self._print(expr.evalf(self._settings["precision"]))
else:
# assign to a variable, perhaps more readable for longer program
return super(JuliaCodePrinter, self)._print_NumberSymbol(expr)
def _print_Assignment(self, expr):
from sympy.functions.elementary.piecewise import Piecewise
from sympy.tensor.indexed import IndexedBase
# Copied from codeprinter, but remove special MatrixSymbol treatment
lhs = expr.lhs
rhs = expr.rhs
# We special case assignments that take multiple lines
if not self._settings["inline"] and isinstance(expr.rhs, Piecewise):
# Here we modify Piecewise so each expression is now
# an Assignment, and then continue on the print.
expressions = []
conditions = []
for (e, c) in rhs.args:
expressions.append(Assignment(lhs, e))
conditions.append(c)
temp = Piecewise(*zip(expressions, conditions))
return self._print(temp)
if self._settings["contract"] and (lhs.has(IndexedBase) or
rhs.has(IndexedBase)):
# Here we check if there is looping to be done, and if so
# print the required loops.
return self._doprint_loops(rhs, lhs)
else:
lhs_code = self._print(lhs)
rhs_code = self._print(rhs)
return self._get_statement("%s = %s" % (lhs_code, rhs_code))
def _print_Infinity(self, expr):
return 'Inf'
def _print_NegativeInfinity(self, expr):
return '-Inf'
def _print_NaN(self, expr):
return 'NaN'
def _print_list(self, expr):
return 'Any[' + ', '.join(self._print(a) for a in expr) + ']'
def _print_tuple(self, expr):
if len(expr) == 1:
return "(%s,)" % self._print(expr[0])
else:
return "(%s)" % self.stringify(expr, ", ")
_print_Tuple = _print_tuple
def _print_BooleanTrue(self, expr):
return "true"
def _print_BooleanFalse(self, expr):
return "false"
def _print_bool(self, expr):
return str(expr).lower()
# Could generate quadrature code for definite Integrals?
#_print_Integral = _print_not_supported
def _print_MatrixBase(self, A):
# Handle zero dimensions:
if A.rows == 0 or A.cols == 0:
return 'zeros(%s, %s)' % (A.rows, A.cols)
elif (A.rows, A.cols) == (1, 1):
return "[%s]" % A[0, 0]
elif A.rows == 1:
return "[%s]" % A.table(self, rowstart='', rowend='', colsep=' ')
elif A.cols == 1:
# note .table would unnecessarily equispace the rows
return "[%s]" % ", ".join([self._print(a) for a in A])
return "[%s]" % A.table(self, rowstart='', rowend='',
rowsep=';\n', colsep=' ')
def _print_SparseMatrix(self, A):
from sympy.matrices import Matrix
L = A.col_list();
# make row vectors of the indices and entries
I = Matrix([k[0] + 1 for k in L])
J = Matrix([k[1] + 1 for k in L])
AIJ = Matrix([k[2] for k in L])
return "sparse(%s, %s, %s, %s, %s)" % (self._print(I), self._print(J),
self._print(AIJ), A.rows, A.cols)
# FIXME: Str/CodePrinter could define each of these to call the _print
# method from higher up the class hierarchy (see _print_NumberSymbol).
# Then subclasses like us would not need to repeat all this.
_print_Matrix = \
_print_DenseMatrix = \
_print_MutableDenseMatrix = \
_print_ImmutableMatrix = \
_print_ImmutableDenseMatrix = \
_print_MatrixBase
_print_MutableSparseMatrix = \
_print_ImmutableSparseMatrix = \
_print_SparseMatrix
def _print_MatrixElement(self, expr):
return self._print(expr.parent) + '[%s,%s]'%(expr.i+1, expr.j+1)
def _print_MatrixSlice(self, expr):
def strslice(x, lim):
l = x[0] + 1
h = x[1]
step = x[2]
lstr = self._print(l)
hstr = 'end' if h == lim else self._print(h)
if step == 1:
if l == 1 and h == lim:
return ':'
if l == h:
return lstr
else:
return lstr + ':' + hstr
else:
return ':'.join((lstr, self._print(step), hstr))
return (self._print(expr.parent) + '[' +
strslice(expr.rowslice, expr.parent.shape[0]) + ',' +
strslice(expr.colslice, expr.parent.shape[1]) + ']')
def _print_Indexed(self, expr):
inds = [ self._print(i) for i in expr.indices ]
return "%s[%s]" % (self._print(expr.base.label), ",".join(inds))
def _print_Idx(self, expr):
return self._print(expr.label)
def _print_Identity(self, expr):
return "eye(%s)" % self._print(expr.shape[0])
# Note: as of 2015, Julia doesn't have spherical Bessel functions
def _print_jn(self, expr):
from sympy.functions import sqrt, besselj
x = expr.argument
expr2 = sqrt(S.Pi/(2*x))*besselj(expr.order + S.Half, x)
return self._print(expr2)
def _print_yn(self, expr):
from sympy.functions import sqrt, bessely
x = expr.argument
expr2 = sqrt(S.Pi/(2*x))*bessely(expr.order + S.Half, x)
return self._print(expr2)
def _print_Piecewise(self, expr):
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
lines = []
if self._settings["inline"]:
# Express each (cond, expr) pair in a nested Horner form:
# (condition) .* (expr) + (not cond) .* (<others>)
# Expressions that result in multiple statements won't work here.
ecpairs = ["({0}) ? ({1}) :".format
(self._print(c), self._print(e))
for e, c in expr.args[:-1]]
elast = " (%s)" % self._print(expr.args[-1].expr)
pw = "\n".join(ecpairs) + elast
# Note: current need these outer brackets for 2*pw. Would be
# nicer to teach parenthesize() to do this for us when needed!
return "(" + pw + ")"
else:
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s)" % self._print(c))
elif i == len(expr.args) - 1 and c == True:
lines.append("else")
else:
lines.append("elseif (%s)" % self._print(c))
code0 = self._print(e)
lines.append(code0)
if i == len(expr.args) - 1:
lines.append("end")
return "\n".join(lines)
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
# code mostly copied from ccode
if isinstance(code, string_types):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_regex = ('^function ', '^if ', '^elseif ', '^else$', '^for ')
dec_regex = ('^end$', '^elseif ', '^else$')
# pre-strip left-space from the code
code = [ line.lstrip(' \t') for line in code ]
increase = [ int(any([search(re, line) for re in inc_regex]))
for line in code ]
decrease = [ int(any([search(re, line) for re in dec_regex]))
for line in code ]
pretty = []
level = 0
for n, line in enumerate(code):
if line == '' or line == '\n':
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def julia_code(expr, assign_to=None, **settings):
r"""Converts `expr` to a string of Julia code.
Parameters
==========
expr : Expr
A sympy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This can be helpful for
expressions that generate multi-line statements.
precision : integer, optional
The precision for numbers such as pi [default=16].
user_functions : dict, optional
A dictionary where keys are ``FunctionClass`` instances and values are
their string representations. Alternatively, the dictionary value can
be a list of tuples i.e. [(argument_test, cfunction_string)]. See
below for examples.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
inline: bool, optional
If True, we try to create single-statement code instead of multiple
statements. [default=True].
Examples
========
>>> from sympy import julia_code, symbols, sin, pi
>>> x = symbols('x')
>>> julia_code(sin(x).series(x).removeO())
'x.^5/120 - x.^3/6 + x'
>>> from sympy import Rational, ceiling, Abs
>>> x, y, tau = symbols("x, y, tau")
>>> julia_code((2*tau)**Rational(7, 2))
'8*sqrt(2)*tau.^(7/2)'
Note that element-wise (Hadamard) operations are used by default between
symbols. This is because its possible in Julia to write "vectorized"
code. It is harmless if the values are scalars.
>>> julia_code(sin(pi*x*y), assign_to="s")
's = sin(pi*x.*y)'
If you need a matrix product "*" or matrix power "^", you can specify the
symbol as a ``MatrixSymbol``.
>>> from sympy import Symbol, MatrixSymbol
>>> n = Symbol('n', integer=True, positive=True)
>>> A = MatrixSymbol('A', n, n)
>>> julia_code(3*pi*A**3)
'(3*pi)*A^3'
This class uses several rules to decide which symbol to use a product.
Pure numbers use "*", Symbols use ".*" and MatrixSymbols use "*".
A HadamardProduct can be used to specify componentwise multiplication ".*"
of two MatrixSymbols. There is currently there is no easy way to specify
scalar symbols, so sometimes the code might have some minor cosmetic
issues. For example, suppose x and y are scalars and A is a Matrix, then
while a human programmer might write "(x^2*y)*A^3", we generate:
>>> julia_code(x**2*y*A**3)
'(x.^2.*y)*A^3'
Matrices are supported using Julia inline notation. When using
``assign_to`` with matrices, the name can be specified either as a string
or as a ``MatrixSymbol``. The dimenions must align in the latter case.
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([[x**2, sin(x), ceiling(x)]])
>>> julia_code(mat, assign_to='A')
'A = [x.^2 sin(x) ceil(x)]'
``Piecewise`` expressions are implemented with logical masking by default.
Alternatively, you can pass "inline=False" to use if-else conditionals.
Note that if the ``Piecewise`` lacks a default term, represented by
``(expr, True)`` then an error will be thrown. This is to prevent
generating an expression that may not evaluate to anything.
>>> from sympy import Piecewise
>>> pw = Piecewise((x + 1, x > 0), (x, True))
>>> julia_code(pw, assign_to=tau)
'tau = ((x > 0) ? (x + 1) : (x))'
Note that any expression that can be generated normally can also exist
inside a Matrix:
>>> mat = Matrix([[x**2, pw, sin(x)]])
>>> julia_code(mat, assign_to='A')
'A = [x.^2 ((x > 0) ? (x + 1) : (x)) sin(x)]'
Custom printing can be defined for certain types by passing a dictionary of
"type" : "function" to the ``user_functions`` kwarg. Alternatively, the
dictionary value can be a list of tuples i.e., [(argument_test,
cfunction_string)]. This can be used to call a custom Julia function.
>>> from sympy import Function
>>> f = Function('f')
>>> g = Function('g')
>>> custom_functions = {
... "f": "existing_julia_fcn",
... "g": [(lambda x: x.is_Matrix, "my_mat_fcn"),
... (lambda x: not x.is_Matrix, "my_fcn")]
... }
>>> mat = Matrix([[1, x]])
>>> julia_code(f(x) + g(x) + g(mat), user_functions=custom_functions)
'existing_julia_fcn(x) + my_fcn(x) + my_mat_fcn([1 x])'
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx, ccode
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e = Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> julia_code(e.rhs, assign_to=e.lhs, contract=False)
'Dy[i] = (y[i + 1] - y[i])./(t[i + 1] - t[i])'
"""
return JuliaCodePrinter(settings).doprint(expr, assign_to)
def print_julia_code(expr, **settings):
"""Prints the Julia representation of the given expression.
See `julia_code` for the meaning of the optional arguments.
"""
print(julia_code(expr, **settings))
|
|
# Copyright 2016 Alethea Katherine Flowers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import sys
from pathlib import Path
from unittest import mock
import pytest
import nox
import nox.__main__
import nox._options
import nox.registry
import nox.sessions
try:
import importlib.metadata as metadata
except ImportError:
import importlib_metadata as metadata
RESOURCES = os.path.join(os.path.dirname(__file__), "resources")
VERSION = metadata.version("nox")
# This is needed because CI systems will mess up these tests due to the
# way nox handles the --session parameter's default value. This avoids that
# mess.
os.environ.pop("NOXSESSION", None)
def test_main_no_args(monkeypatch):
monkeypatch.setattr(sys, "argv", [sys.executable])
with mock.patch("nox.workflow.execute") as execute:
execute.return_value = 0
# Call the function.
with mock.patch.object(sys, "exit") as exit:
nox.__main__.main()
exit.assert_called_once_with(0)
assert execute.called
# Verify that the config looks correct.
config = execute.call_args[1]["global_config"]
assert config.noxfile == "noxfile.py"
assert config.sessions is None
assert not config.no_venv
assert not config.reuse_existing_virtualenvs
assert not config.stop_on_first_error
assert config.posargs == []
def test_main_long_form_args():
sys.argv = [
sys.executable,
"--noxfile",
"noxfile.py",
"--envdir",
".other",
"--sessions",
"1",
"2",
"--default-venv-backend",
"venv",
"--force-venv-backend",
"none",
"--no-venv",
"--reuse-existing-virtualenvs",
"--stop-on-first-error",
]
with mock.patch("nox.workflow.execute") as execute:
execute.return_value = 0
# Call the main function.
with mock.patch.object(sys, "exit") as exit:
nox.__main__.main()
exit.assert_called_once_with(0)
assert execute.called
# Verify that the config looks correct.
config = execute.call_args[1]["global_config"]
assert config.noxfile == "noxfile.py"
assert config.envdir.endswith(".other")
assert config.sessions == ["1", "2"]
assert config.default_venv_backend == "venv"
assert config.force_venv_backend == "none"
assert config.no_venv is True
assert config.reuse_existing_virtualenvs is True
assert config.stop_on_first_error is True
assert config.posargs == []
def test_main_no_venv(monkeypatch, capsys):
# Check that --no-venv overrides force_venv_backend
monkeypatch.setattr(
sys,
"argv",
[
"nox",
"--noxfile",
os.path.join(RESOURCES, "noxfile_pythons.py"),
"--no-venv",
"-s",
"snack(cheese='cheddar')",
],
)
with mock.patch("sys.exit") as sys_exit:
nox.__main__.main()
stdout, stderr = capsys.readouterr()
assert stdout == "Noms, cheddar so good!\n"
assert (
"Session snack is set to run with venv_backend='none', IGNORING its python"
in stderr
)
assert "Session snack(cheese='cheddar') was successful." in stderr
sys_exit.assert_called_once_with(0)
def test_main_no_venv_error():
# Check that --no-venv can not be set together with a non-none --force-venv-backend
sys.argv = [
sys.executable,
"--noxfile",
"noxfile.py",
"--force-venv-backend",
"conda",
"--no-venv",
]
with pytest.raises(ValueError, match="You can not use"):
nox.__main__.main()
def test_main_short_form_args(monkeypatch):
monkeypatch.setattr(
sys,
"argv",
[
sys.executable,
"-f",
"noxfile.py",
"-s",
"1",
"2",
"-db",
"venv",
"-fb",
"conda",
"-r",
],
)
with mock.patch("nox.workflow.execute") as execute:
execute.return_value = 0
# Call the main function.
with mock.patch.object(sys, "exit") as exit:
nox.__main__.main()
exit.assert_called_once_with(0)
assert execute.called
# Verify that the config looks correct.
config = execute.call_args[1]["global_config"]
assert config.noxfile == "noxfile.py"
assert config.sessions == ["1", "2"]
assert config.default_venv_backend == "venv"
assert config.force_venv_backend == "conda"
assert config.reuse_existing_virtualenvs is True
def test_main_explicit_sessions(monkeypatch):
monkeypatch.setattr(sys, "argv", [sys.executable, "-e", "1", "2"])
with mock.patch("nox.workflow.execute") as execute:
execute.return_value = 0
# Call the main function.
with mock.patch.object(sys, "exit") as exit:
nox.__main__.main()
exit.assert_called_once_with(0)
assert execute.called
# Verify that the explicit sessions are listed in the config.
config = execute.call_args[1]["global_config"]
assert config.sessions == ["1", "2"]
def test_main_explicit_sessions_with_spaces_in_names(monkeypatch):
monkeypatch.setattr(
sys, "argv", [sys.executable, "-e", "unit tests", "the unit tests"]
)
with mock.patch("nox.workflow.execute") as execute:
execute.return_value = 0
# Call the main function.
with mock.patch.object(sys, "exit") as exit:
nox.__main__.main()
exit.assert_called_once_with(0)
assert execute.called
# Verify that the explicit sessions are listed in the config.
config = execute.call_args[1]["global_config"]
assert config.sessions == ["unit tests", "the unit tests"]
@pytest.mark.parametrize(
"env,sessions", [("foo", ["foo"]), ("foo,bar", ["foo", "bar"])]
)
def test_main_session_from_nox_env_var(monkeypatch, env, sessions):
monkeypatch.setenv("NOXSESSION", env)
monkeypatch.setattr(sys, "argv", [sys.executable])
with mock.patch("nox.workflow.execute") as execute:
execute.return_value = 0
# Call the main function.
with mock.patch.object(sys, "exit") as exit:
nox.__main__.main()
exit.assert_called_once_with(0)
assert execute.called
# Verify that the sessions from the env var are listed in the config.
config = execute.call_args[1]["global_config"]
assert len(config.sessions) == len(sessions)
for session in sessions:
assert session in config.sessions
def test_main_positional_args(capsys, monkeypatch):
fake_exit = mock.Mock(side_effect=ValueError("asdf!"))
monkeypatch.setattr(sys, "argv", [sys.executable, "1", "2", "3"])
with mock.patch.object(sys, "exit", fake_exit), pytest.raises(
ValueError, match="asdf!"
):
nox.__main__.main()
_, stderr = capsys.readouterr()
assert "Unknown argument(s) '1 2 3'" in stderr
fake_exit.assert_called_once_with(2)
fake_exit.reset_mock()
monkeypatch.setattr(sys, "argv", [sys.executable, "1", "2", "3", "--"])
with mock.patch.object(sys, "exit", fake_exit), pytest.raises(
ValueError, match="asdf!"
):
nox.__main__.main()
_, stderr = capsys.readouterr()
assert "Unknown argument(s) '1 2 3'" in stderr
fake_exit.assert_called_once_with(2)
def test_main_positional_with_double_hyphen(monkeypatch):
monkeypatch.setattr(sys, "argv", [sys.executable, "--", "1", "2", "3"])
with mock.patch("nox.workflow.execute") as execute:
execute.return_value = 0
# Call the main function.
with mock.patch.object(sys, "exit") as exit:
nox.__main__.main()
exit.assert_called_once_with(0)
assert execute.called
# Verify that the positional args are listed in the config.
config = execute.call_args[1]["global_config"]
assert config.posargs == ["1", "2", "3"]
def test_main_positional_flag_like_with_double_hyphen(monkeypatch):
monkeypatch.setattr(
sys, "argv", [sys.executable, "--", "1", "2", "3", "-f", "--baz"]
)
with mock.patch("nox.workflow.execute") as execute:
execute.return_value = 0
# Call the main function.
with mock.patch.object(sys, "exit") as exit:
nox.__main__.main()
exit.assert_called_once_with(0)
assert execute.called
# Verify that the positional args are listed in the config.
config = execute.call_args[1]["global_config"]
assert config.posargs == ["1", "2", "3", "-f", "--baz"]
def test_main_version(capsys, monkeypatch):
monkeypatch.setattr(sys, "argv", [sys.executable, "--version"])
with contextlib.ExitStack() as stack:
execute = stack.enter_context(mock.patch("nox.workflow.execute"))
exit_mock = stack.enter_context(mock.patch("sys.exit"))
nox.__main__.main()
_, err = capsys.readouterr()
assert VERSION in err
exit_mock.assert_not_called()
execute.assert_not_called()
def test_main_help(capsys, monkeypatch):
monkeypatch.setattr(sys, "argv", [sys.executable, "--help"])
with contextlib.ExitStack() as stack:
execute = stack.enter_context(mock.patch("nox.workflow.execute"))
exit_mock = stack.enter_context(mock.patch("sys.exit"))
nox.__main__.main()
out, _ = capsys.readouterr()
assert "help" in out
exit_mock.assert_not_called()
execute.assert_not_called()
def test_main_failure(monkeypatch):
monkeypatch.setattr(sys, "argv", [sys.executable])
with mock.patch("nox.workflow.execute") as execute:
execute.return_value = 1
with mock.patch.object(sys, "exit") as exit:
nox.__main__.main()
exit.assert_called_once_with(1)
def test_main_nested_config(capsys, monkeypatch):
monkeypatch.setattr(
sys,
"argv",
[
"nox",
"--noxfile",
os.path.join(RESOURCES, "noxfile_nested.py"),
"-s",
"snack(cheese='cheddar')",
],
)
with mock.patch("sys.exit") as sys_exit:
nox.__main__.main()
stdout, stderr = capsys.readouterr()
assert stdout == "Noms, cheddar so good!\n"
assert "Session snack(cheese='cheddar') was successful." in stderr
sys_exit.assert_called_once_with(0)
def test_main_session_with_names(capsys, monkeypatch):
monkeypatch.setattr(
sys,
"argv",
[
"nox",
"--noxfile",
os.path.join(RESOURCES, "noxfile_spaces.py"),
"-s",
"cheese list(cheese='cheddar')",
],
)
with mock.patch("sys.exit") as sys_exit:
nox.__main__.main()
stdout, stderr = capsys.readouterr()
assert stdout == "Noms, cheddar so good!\n"
assert "Session cheese list(cheese='cheddar') was successful." in stderr
sys_exit.assert_called_once_with(0)
@pytest.fixture
def run_nox(capsys, monkeypatch):
def _run_nox(*args):
monkeypatch.setattr(sys, "argv", ["nox", *args])
with mock.patch("sys.exit") as sys_exit:
nox.__main__.main()
stdout, stderr = capsys.readouterr()
returncode = sys_exit.call_args[0][0]
return returncode, stdout, stderr
return _run_nox
@pytest.mark.parametrize(
("normalized_name", "session"),
[
("test(arg='Jane')", "test(arg='Jane')"),
("test(arg='Jane')", 'test(arg="Jane")'),
("test(arg='Jane')", 'test(arg = "Jane")'),
("test(arg='Jane')", 'test ( arg = "Jane" )'),
('test(arg="Joe\'s")', 'test(arg="Joe\'s")'),
('test(arg="Joe\'s")', "test(arg='Joe\\'s')"),
("test(arg='\"hello world\"')", "test(arg='\"hello world\"')"),
("test(arg='\"hello world\"')", 'test(arg="\\"hello world\\"")'),
("test(arg=[42])", "test(arg=[42])"),
("test(arg=[42])", "test(arg=[42,])"),
("test(arg=[42])", "test(arg=[ 42 ])"),
("test(arg=[42])", "test(arg=[0x2a])"),
(
"test(arg=datetime.datetime(1980, 1, 1, 0, 0))",
"test(arg=datetime.datetime(1980, 1, 1, 0, 0))",
),
(
"test(arg=datetime.datetime(1980, 1, 1, 0, 0))",
"test(arg=datetime.datetime(1980,1,1,0,0))",
),
(
"test(arg=datetime.datetime(1980, 1, 1, 0, 0))",
"test(arg=datetime.datetime(1980, 1, 1, 0, 0x0))",
),
],
)
def test_main_with_normalized_session_names(run_nox, normalized_name, session):
noxfile = os.path.join(RESOURCES, "noxfile_normalization.py")
returncode, _, stderr = run_nox(f"--noxfile={noxfile}", f"--session={session}")
assert returncode == 0
assert normalized_name in stderr
@pytest.mark.parametrize(
"session",
[
"syntax error",
"test(arg=Jane)",
"test(arg='Jane ')",
"_test(arg='Jane')",
"test(arg=42)",
"test(arg=[42.0])",
"test(arg=[43])",
"test(arg=<user_nox_module.Foo object at 0x123456789abc>)",
"test(arg=datetime.datetime(1980, 1, 1))",
],
)
def test_main_with_bad_session_names(run_nox, session):
noxfile = os.path.join(RESOURCES, "noxfile_normalization.py")
returncode, _, stderr = run_nox(f"--noxfile={noxfile}", f"--session={session}")
assert returncode != 0
assert session in stderr
def test_main_noxfile_options(monkeypatch):
monkeypatch.setattr(
sys,
"argv",
[
"nox",
"-l",
"-s",
"test",
"--noxfile",
os.path.join(RESOURCES, "noxfile_options.py"),
],
)
with mock.patch("nox.tasks.honor_list_request") as honor_list_request:
honor_list_request.return_value = 0
with mock.patch("sys.exit"):
nox.__main__.main()
assert honor_list_request.called
# Verify that the config looks correct.
config = honor_list_request.call_args[1]["global_config"]
assert config.reuse_existing_virtualenvs is True
def test_main_noxfile_options_disabled_by_flag(monkeypatch):
monkeypatch.setattr(
sys,
"argv",
[
"nox",
"-l",
"-s",
"test",
"--no-reuse-existing-virtualenvs",
"--noxfile",
os.path.join(RESOURCES, "noxfile_options.py"),
],
)
with mock.patch("nox.tasks.honor_list_request") as honor_list_request:
honor_list_request.return_value = 0
with mock.patch("sys.exit"):
nox.__main__.main()
assert honor_list_request.called
# Verify that the config looks correct.
config = honor_list_request.call_args[1]["global_config"]
assert config.reuse_existing_virtualenvs is False
def test_main_noxfile_options_sessions(monkeypatch):
monkeypatch.setattr(
sys,
"argv",
["nox", "-l", "--noxfile", os.path.join(RESOURCES, "noxfile_options.py")],
)
with mock.patch("nox.tasks.honor_list_request") as honor_list_request:
honor_list_request.return_value = 0
with mock.patch("sys.exit"):
nox.__main__.main()
assert honor_list_request.called
# Verify that the config looks correct.
config = honor_list_request.call_args[1]["global_config"]
assert config.sessions == ["test"]
@pytest.fixture
def generate_noxfile_options_pythons(tmp_path):
"""Generate noxfile.py with test and launch_rocket sessions.
The sessions are defined for both the default and alternate Python versions.
The ``default_session`` and ``default_python`` parameters determine what
goes into ``nox.options.sessions`` and ``nox.options.pythons``, respectively.
"""
def generate_noxfile(default_session, default_python, alternate_python):
path = Path(RESOURCES) / "noxfile_options_pythons.py"
text = path.read_text()
text = text.format(
default_session=default_session,
default_python=default_python,
alternate_python=alternate_python,
)
path = tmp_path / "noxfile.py"
path.write_text(text)
return str(path)
return generate_noxfile
python_current_version = "{}.{}".format(sys.version_info.major, sys.version_info.minor)
python_next_version = "{}.{}".format(sys.version_info.major, sys.version_info.minor + 1)
def test_main_noxfile_options_with_pythons_override(
capsys, monkeypatch, generate_noxfile_options_pythons
):
noxfile = generate_noxfile_options_pythons(
default_session="test",
default_python=python_next_version,
alternate_python=python_current_version,
)
monkeypatch.setattr(
sys, "argv", ["nox", "--noxfile", noxfile, "--python", python_current_version]
)
with mock.patch("sys.exit") as sys_exit:
nox.__main__.main()
_, stderr = capsys.readouterr()
sys_exit.assert_called_once_with(0)
for python_version in [python_current_version, python_next_version]:
for session in ["test", "launch_rocket"]:
line = "Running session {}-{}".format(session, python_version)
if session == "test" and python_version == python_current_version:
assert line in stderr
else:
assert line not in stderr
def test_main_noxfile_options_with_sessions_override(
capsys, monkeypatch, generate_noxfile_options_pythons
):
noxfile = generate_noxfile_options_pythons(
default_session="test",
default_python=python_current_version,
alternate_python=python_next_version,
)
monkeypatch.setattr(
sys, "argv", ["nox", "--noxfile", noxfile, "--session", "launch_rocket"]
)
with mock.patch("sys.exit") as sys_exit:
nox.__main__.main()
_, stderr = capsys.readouterr()
sys_exit.assert_called_once_with(0)
for python_version in [python_current_version, python_next_version]:
for session in ["test", "launch_rocket"]:
line = "Running session {}-{}".format(session, python_version)
if session == "launch_rocket" and python_version == python_current_version:
assert line in stderr
else:
assert line not in stderr
@pytest.mark.parametrize(("isatty_value", "expected"), [(True, True), (False, False)])
def test_main_color_from_isatty(monkeypatch, isatty_value, expected):
monkeypatch.setattr(sys, "argv", [sys.executable])
with mock.patch("nox.workflow.execute") as execute:
execute.return_value = 0
with mock.patch("sys.stdout.isatty") as isatty:
isatty.return_value = isatty_value
# Call the main function.
with mock.patch.object(sys, "exit"):
nox.__main__.main()
config = execute.call_args[1]["global_config"]
assert config.color == expected
@pytest.mark.parametrize(
("color_opt", "expected"),
[
("--forcecolor", True),
("--nocolor", False),
("--force-color", True),
("--no-color", False),
],
)
def test_main_color_options(monkeypatch, color_opt, expected):
monkeypatch.setattr(sys, "argv", [sys.executable, color_opt])
with mock.patch("nox.workflow.execute") as execute:
execute.return_value = 0
# Call the main function.
with mock.patch.object(sys, "exit"):
nox.__main__.main()
config = execute.call_args[1]["global_config"]
assert config.color == expected
def test_main_color_conflict(capsys, monkeypatch):
monkeypatch.setattr(sys, "argv", [sys.executable, "--forcecolor", "--nocolor"])
with mock.patch("nox.workflow.execute") as execute:
execute.return_value = 1
# Call the main function.
with mock.patch.object(sys, "exit") as exit:
nox.__main__.main()
exit.assert_called_with(1)
_, err = capsys.readouterr()
assert "color" in err
def test_main_force_python(monkeypatch):
monkeypatch.setattr(sys, "argv", ["nox", "--force-python=3.10"])
with mock.patch("nox.workflow.execute", return_value=0) as execute:
with mock.patch.object(sys, "exit"):
nox.__main__.main()
config = execute.call_args[1]["global_config"]
assert config.pythons == config.extra_pythons == ["3.10"]
def test_main_reuse_existing_virtualenvs_no_install(monkeypatch):
monkeypatch.setattr(sys, "argv", ["nox", "-R"])
with mock.patch("nox.workflow.execute", return_value=0) as execute:
with mock.patch.object(sys, "exit"):
nox.__main__.main()
config = execute.call_args[1]["global_config"]
assert config.reuse_existing_virtualenvs and config.no_install
|
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 24 12:25:17 2017
@author: dongyu
"""
import os
import utm
import glob
import shutil
import time
import lib
import lib.SUNTANS
import hydro_wrapper
import oilspill_wrapper
from DownloadTool import downloadROMS
from FileTool import increaseT2
from WindTool import NCEP_wind, TAMU_NCEP_wind
from Probability_map import Pmap
from Blended_model import blend
import pdb
class upper_wrapper(object):
"""
The uppermost level of wrapper
"""
# Switches to different models
hydro_model='BLENDED' # hydrodynamic model options: 'SUNTANS', 'ROMS', 'BLENDED'
hydro_run=True
# ROMS data source
ROMS_datasource='online'
# General options
ishindcast=True
runGNOME=False
runTracPy=False
## GNOME settings
gnome_subset=False
gnome_bbox=None
probability_map=False
google_earth=False
mpl = 8
number = 10 # number of ensembles
interval = 10800 # seconds
OBC_opt = 'file' # Type 3 boundary condition option: 'constant',
#'file','OTIS', 'ROMS', 'ROMSOTIS','ROMSFILE', 'ROMSOTISFILE'
IC_opt = 'SUNTANS'
def __init__(self,**kwargs):
self.__dict__.update(kwargs)
def __call__(self,starttime, endtime, starttime2, period, init_latlon=[28.353786, -95.315109]):
## hydrodynamic time
self.starttime = starttime
self.endtime = endtime
## oil spill time
self.starttime2 = starttime2
self.period = period
self.init_latlon = init_latlon
if self.hydro_model == 'SUNTANS':
self.run_suntans()
elif self.hydro_model == 'ROMS':
self.run_roms()
elif self.hydro_model == 'BLENDED':
self.run_blended()
else:
raise Exception, 'Need to set the hydro_model parameter !!'
def run_suntans(self):
"""
use SUNTANS velocity to run GNOME
"""
(utm_x,utm_y)=utm.from_latlon(self.init_latlon[0], self.init_latlon[1])[0:2]
for i in range(self.number):
start = time.time()
print "Running simulation #%s !!\n"%str(i)
if self.hydro_run:
#### run SUNTANS ####
hydro_wrapper.runSUNTANS(self.starttime, self.endtime, self.OBC_opt, self.IC_opt, ROMS_datasource=self.ROMS_datasource)
if self.runGNOME:
## Collect SUNTANS file
basedir = os.getcwd()
os.chdir(basedir+'/SUNTANS/rundata')
ncfiles = []
for ff in glob.glob("GalvCoarse_0*"):
ncfiles.append(ff)
os.chdir(basedir)
SUNTANS_file = []
for f in ncfiles:
SUNTANS_file.append('%s/%s'%(basedir+'/SUNTANS/rundata', f))
## Prepare for GNOME run
GNOME_dir = "GNOME/%s"%str(i)
if os.path.exists(GNOME_dir):
shutil.rmtree(GNOME_dir)
os.makedirs(GNOME_dir)
SUNTANS_out = '%s/txsuntans.nc'%GNOME_dir
oilspill_wrapper.init_model(i, opt='SUNTANS')
oilspill_wrapper.Tx_SUNTANS(SUNTANS_file, SUNTANS_out)
print "Forecast simulation, downloading TAMU-NCEP wind !!!\n"
subset_wind = True
TNW = TAMU_NCEP_wind(subset_wind)
TNW.writeGNOME('%s/wind.nc'%GNOME_dir)
print 'running GNOME !!!\n'
oilspill_wrapper.run_mul_GNOME(i, utm_x, utm_y, self.starttime2, self.period, 900, opt='SUNTANS')
oilspill_wrapper.GNOME_GM_visualization(i, opt='SUNTANS')
oilspill_wrapper.GNOME_GE_animation(i, self.starttime2, opt='SUNTANS')
#### pause a while for new data to be available ####
end = time.time()
timer(start, end, i, self.number, self.interval)
if self.runGNOME and self.probability_map:
oilspill_wrapper.ensemble_combination(self.number, opt='SUNTANS')
print 'creating probability map!!!\n'
bbox=[-95.22,-94.44,28.80,29.85] # the map range
Pmap('GNOME_combined.nc', 400, 400, self.starttime2, bbox, self.mpl, self.google_earth)
def run_roms(self):
"""
use ROMS velocity to run GNOME
A good testing date that the particles will hit SUNTANS domain is 2014-08-22~2014-08-29
A good testing initial location is 28.353786, -95.315109
"""
(utm_x,utm_y)=utm.from_latlon(self.init_latlon[0], self.init_latlon[1])[0:2]
for i in range(self.number):
start = time.time()
print "Running simulation #%s !!\n"%str(i)
if self.hydro_run:
#### download ROMS ####
downloadROMS(self.starttime, self.endtime, self.ROMS_datasource, ROMSsrc='forecast')
if self.runGNOME:
## Prepare for GNOME run
GNOME_dir = "GNOME/%s"%str(i)
if os.path.exists(GNOME_dir):
shutil.rmtree(GNOME_dir)
os.makedirs(GNOME_dir)
ROMS_file='DATA/txla_subset_HIS.nc'
ROMS_out = '%s/hiroms_ss_rho.nc'%GNOME_dir
oilspill_wrapper.init_model(i, opt='ROMS')
oilspill_wrapper.HIROMS(ROMS_file, ROMS_out)
#### wind ####
print "Forecast simulation, downloading TAMU-NCEP wind !!!\n"
subset_wind = False
TNW = TAMU_NCEP_wind(subset_wind)
TNW.writeGNOME('%s/wind.nc'%GNOME_dir)
print 'running GNOME !!!\n'
oilspill_wrapper.run_mul_GNOME(i, utm_x, utm_y, self.starttime2, self.period, 900, opt='ROMS')
oilspill_wrapper.GNOME_GM_visualization(i, opt='ROMS')
oilspill_wrapper.GNOME_GE_animation(i, self.starttime2, opt='ROMS')
#### run TracPy ####
if self.runTracPy:
## create TracPy directory
TRACPY_dir = 'TRACPY/%s'%str(i)
if os.path.exists(TRACPY_dir):
shutil.rmtree(TRACPY_dir)
os.makedirs(TRACPY_dir)
## move files
oilspill_wrapper.init_tracpy(i)
## run TracPy
print 'running TracPy !!!\n'
oilspill_wrapper.TRACPY(i, utm_x, utm_y, self.starttime2, self.period, opt='ROMS')
oilspill_wrapper.TRACPY_GM_visualization(i, opt='ROMS')
oilspill_wrapper.TRACPY_GE_animation(i, self.starttime2, opt='ROMS')
##### pause a while for new data to be available ####
## timer
end = time.time()
timer(start, end, i, self.number, self.interval)
#### probability map ####
if self.runGNOME and self.probability_map:
oilspill_wrapper.ensemble_combination(self.number, opt='ROMS')
print 'creating probability map!!!\n'
bbox=[-95.97,-94.025,27.24,29.89] # the map range
Pmap('GNOME_combined.nc', 400, 400, self.starttime2, bbox, self.mpl, self.google_earth)
def run_blended(self):
"""
use blended model velocity to run GNOME
"""
(utm_x,utm_y)=utm.from_latlon(self.init_latlon[0], self.init_latlon[1])[0:2]
for i in range(self.number):
start = time.time()
print "Running simulation #%s !!\n"%str(i)
if self.hydro_run:
## Step One: run SUNTANS
hydro_wrapper.runSUNTANS(self.starttime, self.endtime, 'ROMSFILE', 'ROMS', ROMS_datasource=self.ROMS_datasource)
## Step Two: Blend SUNTANS and ROMS
BL = blend(self.starttime, self.endtime)
BL.model_velocity()
## Step Three: run GNOME
if self.runGNOME:
## Prepare for GNOME run
GNOME_dir = "GNOME/%s"%str(i)
if os.path.exists(GNOME_dir):
shutil.rmtree(GNOME_dir)
os.makedirs(GNOME_dir)
blended_file = 'DATA/blended_uv.nc'
blended_out = '%s/hiroms_ss_rho.nc'%GNOME_dir
oilspill_wrapper.init_model(i, opt='blended')
oilspill_wrapper.HIROMS(blended_file, blended_out, subset=self.gnome_subset, bbox=self.gnome_bbox)
## GNOME wind
print "Forecast simulation, downloading TAMU-NCEP wind !!!\n"
subset_wind = False
TNW = TAMU_NCEP_wind(subset_wind)
TNW.writeGNOME('%s/wind.nc'%GNOME_dir)
print 'running GNOME !!!\n'
oilspill_wrapper.run_mul_GNOME(i, utm_x, utm_y, self.starttime2, self.period, 900, opt='blended')
oilspill_wrapper.GNOME_GM_visualization(i, opt='blended')
oilspill_wrapper.GNOME_GE_animation(i, self.starttime2, opt='blended')
#### Step Four: run TracPy ####
if self.runTracPy:
## create TracPy directory
TRACPY_dir = 'TRACPY/%s'%str(i)
if os.path.exists(TRACPY_dir):
shutil.rmtree(TRACPY_dir)
os.makedirs(TRACPY_dir)
## move files
oilspill_wrapper.init_tracpy(i)
## run TracPy
print 'running TracPy !!!\n'
oilspill_wrapper.TRACPY(i, utm_x, utm_y, self.starttime2, self.period, opt='blended')
oilspill_wrapper.TRACPY_GM_visualization(i, opt='blended')
oilspill_wrapper.TRACPY_GE_animation(i, self.starttime2, opt='blended')
#### pause a while for new data to be available ####
## timer
end = time.time()
timer(start, end, i, self.number, self.interval)
if self.runGNOME and self.probability_map:
oilspill_wrapper.ensemble_combination(self.number, opt='blended')
print 'creating probability map!!!\n'
bbox=[-95.97,-94.025,27.24,29.89] # the map range
Pmap('GNOME_combined.nc', 400, 400, self.starttime2, bbox, self.mpl, self.google_earth)
def timer(start, end, i, number, interval):
"""
pause time
"""
simulation_time = int(end-start)
#### Start pausing ####
if i!= number-1:
sleep_time = interval - simulation_time
for j in xrange(int(sleep_time/60.)*60, 0, -60):
mm = j/60
print 'Starting new simulation in %d minutes ...'%mm
time.sleep(60)
#### For testing only
if __name__ == "__main__":
starttime='2016-03-15-00'
endtime='2016-03-19-00'
endtime='2016-03-16-00'
#starttime='2017-03-15-00'
#endtime='2017-03-19-00'
UW = upper_wrapper()
UW(starttime, endtime, 20, init_latlon=[28.353786, -95.315109]) #ROMS domain
#UW(starttime, endtime, 90, init_latlon=[29.463089, -94.843460]) #SUNTANS domain
|
|
from flask import request, url_for, redirect, flash
from jinja2 import contextfunction
from flask.ext.admin.babel import gettext
from flask.ext.admin.base import BaseView, expose
from flask.ext.admin.tools import rec_getattr, ObsoleteAttr
from flask.ext.admin.model import filters, typefmt
from flask.ext.admin.actions import ActionsMixin
class BaseModelView(BaseView, ActionsMixin):
"""
Base model view.
View does not make any assumptions on how models are stored or managed, but expects following:
1. Model is an object
2. Model contains properties
3. Each model contains attribute which uniquely identifies it (i.e. primary key for database model)
4. You can get list of sorted models with pagination applied from a data source
5. You can get one model by its identifier from the data source
Essentially, if you want to support new data store, all you have to do:
1. Derive from `BaseModelView` class
2. Implement various data-related methods (`get_list`, `get_one`, `create_model`, etc)
3. Implement automatic form generation from the model representation (`scaffold_form`)
"""
# Permissions
can_create = True
"""Is model creation allowed"""
can_edit = True
"""Is model editing allowed"""
can_delete = True
"""Is model deletion allowed"""
# Templates
list_template = 'admin/model/list.html'
"""Default list view template"""
edit_template = 'admin/model/edit.html'
"""Default edit template"""
create_template = 'admin/model/create.html'
"""Default create template"""
# Customizations
column_list = ObsoleteAttr('column_list', 'list_columns', None)
"""
Collection of the model field names for the list view.
If set to `None`, will get them from the model.
For example::
class MyModelView(BaseModelView):
column_list = ('name', 'last_name', 'email')
"""
column_exclude_list = ObsoleteAttr('column_exclude_list',
'excluded_list_columns', None)
"""
Collection of excluded list column names.
For example::
class MyModelView(BaseModelView):
column_exclude_list = ('last_name', 'email')
"""
column_formatters = ObsoleteAttr('column_formatters', 'list_formatters', dict())
"""
Dictionary of list view column formatters.
For example, if you want to show price multiplied by
two, you can do something like this::
class MyModelView(BaseModelView):
column_formatters = dict(price=lambda c, m, p: m.price*2)
Callback function has following prototype::
def formatter(context, model, name):
# context is instance of jinja2.runtime.Context
# model is model instance
# name is property name
pass
"""
column_type_formatters = ObsoleteAttr('column_type_formatters', 'list_type_formatters', None)
"""
Dictionary of value type formatters to be used in list view.
By default, two types are formatted:
1. ``None`` will be displayed as empty string
2. ``bool`` will be displayed as check if it is ``True``
If you don't like default behavior and don't want any type formatters
applied, just override this property with empty dictionary::
class MyModelView(BaseModelView):
column_type_formatters = dict()
If you want to display `NULL` instead of empty string, you can do
something like this::
from flask.ext.admin import typefmt
MY_DEFAULT_FORMATTERS = dict(typefmt.BASE_FORMATTERS).extend({
type(None): typefmt.null_formatter
})
class MyModelView(BaseModelView):
column_type_formatters = MY_DEFAULT_FORMATTERS
Type formatters have lower priority than list column formatters.
"""
column_labels = ObsoleteAttr('column_labels', 'rename_columns', None)
"""
Dictionary where key is column name and value is string to display.
For example::
class MyModelView(BaseModelView):
column_labels = dict(name='Name', last_name='Last Name')
"""
column_descriptions = None
"""
Dictionary where key is column name and
value is description for `list view` column or add/edit form field.
For example::
class MyModelView(BaseModelView):
column_descriptions = dict(
full_name='First and Last name'
)
"""
column_sortable_list = ObsoleteAttr('column_sortable_list',
'sortable_columns',
None)
"""
Collection of the sortable columns for the list view.
If set to `None`, will get them from the model.
For example::
class MyModelView(BaseModelView):
column_sortable_list = ('name', 'last_name')
If you want to explicitly specify field/column to be used while
sorting, you can use tuple::
class MyModelView(BaseModelView):
column_sortable_list = ('name', ('user', 'user.username'))
When using SQLAlchemy models, model attributes can be used instead
of the string::
class MyModelView(BaseModelView):
column_sortable_list = ('name', ('user', User.username))
"""
column_searchable_list = ObsoleteAttr('column_searchable_list',
'searchable_columns',
None)
"""
Collection of the searchable columns. It is assumed that only
text-only fields are searchable, but it is up for a model
implementation to make decision.
Example::
class MyModelView(BaseModelView):
column_searchable_list = ('name', 'email')
"""
column_filters = None
"""
Collection of the column filters.
Can contain either field names or instances of :class:`~flask.ext.admin.model.filters.BaseFilter` classes.
Example::
class MyModelView(BaseModelView):
column_filters = ('user', 'email')
"""
column_display_pk = ObsoleteAttr('column_display_pk',
'list_display_pk',
False)
"""
Controls if primary key should be displayed in list view.
"""
form = None
"""
Form class. Override if you want to use custom form for your model.
For example::
class MyForm(wtf.Form):
pass
class MyModelView(BaseModelView):
form = MyForm
"""
form_args = None
"""
Dictionary of form field arguments. Refer to WTForms documentation for
list of possible options.
Example::
class MyModelView(BaseModelView):
form_args = dict(
name=dict(label='First Name', validators=[wtf.required()])
)
"""
form_columns = None
"""
Collection of the model field names for the form. If set to `None` will
get them from the model.
Example::
class MyModelView(BaseModelView):
form_columns = ('name', 'email')
"""
form_excluded_columns = ObsoleteAttr('form_excluded_columns',
'excluded_form_columns',
None)
"""
Collection of excluded form field names.
For example::
class MyModelView(BaseModelView):
form_excluded_columns = ('last_name', 'email')
"""
form_overrides = None
"""
Dictionary of form column overrides.
Example::
class MyModelView(BaseModelView):
form_overrides = dict(name=wtf.FileField)
"""
# Actions
action_disallowed_list = ObsoleteAttr('action_disallowed_list',
'disallowed_actions',
[])
"""
Set of disallowed action names. For example, if you want to disable
mass model deletion, do something like this:
class MyModelView(BaseModelView):
action_disallowed_list = ['delete']
"""
# Various settings
page_size = 20
"""
Default page size.
"""
def __init__(self, model,
name=None, category=None, endpoint=None, url=None):
"""
Constructor.
:param model:
Model class
:param name:
View name. If not provided, will use model class name
:param category:
View category
:param endpoint:
Base endpoint. If not provided, will use model name + 'view'.
For example if model name was 'User', endpoint will be
'userview'
:param url:
Base URL. If not provided, will use endpoint as a URL.
"""
# If name not provided, it is model name
if name is None:
name = '%s' % self._prettify_name(model.__name__)
# If endpoint not provided, it is model name + 'view'
if endpoint is None:
endpoint = ('%sview' % model.__name__).lower()
super(BaseModelView, self).__init__(name, category, endpoint, url)
self.model = model
# Actions
self.init_actions()
# Scaffolding
self._refresh_cache()
# Caching
def _refresh_cache(self):
"""
Refresh various cached variables.
"""
# List view
self._list_columns = self.get_list_columns()
self._sortable_columns = self.get_sortable_columns()
# Forms
self._create_form_class = self.get_create_form()
self._edit_form_class = self.get_edit_form()
# Search
self._search_supported = self.init_search()
# Filters
self._filters = self.get_filters()
# Type formatters
if self.column_type_formatters is None:
self.column_type_formatters = dict(typefmt.BASE_FORMATTERS)
if self.column_descriptions is None:
self.column_descriptions = dict()
if self._filters:
self._filter_groups = []
self._filter_dict = dict()
for i, n in enumerate(self._filters):
if n.name not in self._filter_dict:
group = []
self._filter_dict[n.name] = group
self._filter_groups.append((n.name, group))
else:
group = self._filter_dict[n.name]
group.append((i, n.operation()))
self._filter_types = dict((i, f.data_type)
for i, f in enumerate(self._filters)
if f.data_type)
else:
self._filter_groups = None
self._filter_types = None
# Primary key
def get_pk_value(self, model):
"""
Return PK value from a model object.
"""
raise NotImplemented()
# List view
def scaffold_list_columns(self):
"""
Return list of the model field names. Must be implemented in
the child class.
Expected return format is list of tuples with field name and
display text. For example::
['name', 'first_name', 'last_name']
"""
raise NotImplemented('Please implement scaffold_list_columns method')
def get_column_name(self, field):
"""
Return human-readable column name.
:param field:
Model field name.
"""
if self.column_labels and field in self.column_labels:
return self.column_labels[field]
else:
return self.prettify_name(field)
def get_list_columns(self):
"""
Returns list of the model field names. If `column_list` was
set, returns it. Otherwise calls `scaffold_list_columns`
to generate list from the model.
"""
columns = self.column_list
if columns is None:
columns = self.scaffold_list_columns()
# Filter excluded columns
if self.column_exclude_list:
columns = [c for c in columns if c not in self.column_exclude_list]
return [(c, self.get_column_name(c)) for c in columns]
def scaffold_sortable_columns(self):
"""
Returns dictionary of sortable columns. Must be implemented in
the child class.
Expected return format is dictionary, where key is field name and
value is property name.
"""
raise NotImplemented('Please implement scaffold_sortable_columns method')
def get_sortable_columns(self):
"""
Returns dictionary of the sortable columns. Key is a model
field name and value is sort column (for example - attribute).
If `column_sortable_list` is set, will use it. Otherwise, will call
`scaffold_sortable_columns` to get them from the model.
"""
if self.column_sortable_list is None:
return self.scaffold_sortable_columns() or dict()
else:
result = dict()
for c in self.column_sortable_list:
if isinstance(c, tuple):
result[c[0]] = c[1]
else:
result[c] = c
return result
def init_search(self):
"""
Initialize search. If data provider does not support search,
`init_search` will return `False`.
"""
return False
def scaffold_filters(self, name):
"""
Generate filter object for the given name
:param name:
Name of the field
"""
return None
def is_valid_filter(self, filter):
"""
Verify that provided filter object is valid.
Override in model backend implementation to verify if
provided filter type is allowed.
:param filter:
Filter object to verify.
"""
return isinstance(filter, filters.BaseFilter)
def get_filters(self):
"""
Return list of filter objects.
If your model backend implementation does not support filters,
override this method and return `None`.
"""
if self.column_filters:
collection = []
for n in self.column_filters:
if not self.is_valid_filter(n):
flt = self.scaffold_filters(n)
if flt:
collection.extend(flt)
else:
raise Exception('Unsupported filter type %s' % n)
else:
collection.append(n)
return collection
else:
return None
def scaffold_form(self):
"""
Create `form.BaseForm` inherited class from the model. Must be
implemented in the child class.
"""
raise NotImplemented('Please implement scaffold_form method')
def get_form(self):
"""
Get form class.
If ``self.form`` is set, will return it and will call
``self.scaffold_form`` otherwise.
Override to implement customized behavior.
"""
if self.form is not None:
return self.form
return self.scaffold_form()
def get_create_form(self):
"""
Create form class for model creation view.
Override to implement customized behavior.
"""
return self.get_form()
def get_edit_form(self):
"""
Create form class for model editing view.
Override to implement customized behavior.
"""
return self.get_form()
def create_form(self, obj=None):
"""
Instantiate model creation form and return it.
Override to implement custom behavior.
"""
return self._create_form_class(obj=obj)
def edit_form(self, obj=None):
"""
Instantiate model editing form and return it.
Override to implement custom behavior.
"""
return self._edit_form_class(obj=obj)
# Helpers
def is_sortable(self, name):
"""
Verify if column is sortable.
:param name:
Column name.
"""
return name in self._sortable_columns
def _get_column_by_idx(self, idx):
"""
Return column index by
"""
if idx is None or idx < 0 or idx >= len(self._list_columns):
return None
return self._list_columns[idx]
# Database-related API
def get_list(self, page, sort_field, sort_desc, search, filters):
"""
Return list of models from the data source with applied pagination
and sorting.
Must be implemented in child class.
:param page:
Page number, 0 based. Can be set to None if it is first page.
:param sort_field:
Sort column name or None.
:param sort_desc:
If set to True, sorting is in descending order.
:param search:
Search query
:param filters:
List of filter tuples. First value in a tuple is a search
index, second value is a search value.
"""
raise NotImplemented('Please implement get_list method')
def get_one(self, id):
"""
Return one model by its id.
Must be implemented in the child class.
:param id:
Model id
"""
raise NotImplemented('Please implement get_one method')
# Model handlers
def on_model_change(self, form, model):
"""
Allow to do some actions after a model was created or updated.
Called from create_model and update_model in the same transaction
(if it has any meaning for a store backend).
By default do nothing.
"""
pass
def on_model_delete(self, model):
"""
Allow to do some actions before a model will be deleted.
Called from delete_model in the same transaction
(if it has any meaning for a store backend).
By default do nothing.
"""
pass
def create_model(self, form):
"""
Create model from the form.
Returns `True` if operation succeeded.
Must be implemented in the child class.
:param form:
Form instance
"""
raise NotImplemented()
def update_model(self, form, model):
"""
Update model from the form.
Returns `True` if operation succeeded.
Must be implemented in the child class.
:param form:
Form instance
:param model:
Model instance
"""
raise NotImplemented()
def delete_model(self, model):
"""
Delete model.
Returns `True` if operation succeeded.
Must be implemented in the child class.
:param model:
Model instance
"""
raise NotImplemented()
# Various helpers
def prettify_name(self, name):
"""
Prettify pythonic variable name.
For example, 'hello_world' will be converted to 'Hello World'
:param name:
Name to prettify
"""
return name.replace('_', ' ').title()
# URL generation helper
def _get_extra_args(self):
"""
Return arguments from query string.
"""
page = request.args.get('page', 0, type=int)
sort = request.args.get('sort', None, type=int)
sort_desc = request.args.get('desc', None, type=int)
search = request.args.get('search', None)
# Gather filters
if self._filters:
sfilters = []
for n in request.args:
if n.startswith('flt'):
ofs = n.find('_')
if ofs == -1:
continue
try:
pos = int(n[3:ofs])
idx = int(n[ofs + 1:])
except ValueError:
continue
if idx >= 0 and idx < len(self._filters):
flt = self._filters[idx]
value = request.args[n]
if flt.validate(value):
sfilters.append((pos, (idx, flt.clean(value))))
filters = [v[1] for v in sorted(sfilters, key=lambda n: n[0])]
else:
filters = None
return page, sort, sort_desc, search, filters
def _get_url(self, view=None, page=None, sort=None, sort_desc=None,
search=None, filters=None):
"""
Generate page URL with current page, sort column and
other parameters.
:param view:
View name
:param page:
Page number
:param sort:
Sort column index
:param sort_desc:
Use descending sorting order
:param search:
Search query
:param filters:
List of active filters
"""
if not search:
search = None
if not page:
page = None
kwargs = dict(page=page, sort=sort, desc=sort_desc, search=search)
if filters:
for i, flt in enumerate(filters):
key = 'flt%d_%d' % (i, flt[0])
kwargs[key] = flt[1]
return url_for(view, **kwargs)
def is_action_allowed(self, name):
"""
Override this method to allow or disallow actions based
on some condition.
Default implementation only checks if particular action
is not in `action_disallowed_list`.
"""
return name not in self.action_disallowed_list
@contextfunction
def get_list_value(self, context, model, name):
"""
Returns value to be displayed in list view
:param context:
:py:class:`jinja2.runtime.Context`
:param model:
Model instance
:param name:
Field name
"""
column_fmt = self.column_formatters.get(name)
if column_fmt is not None:
return column_fmt(context, model, name)
value = rec_getattr(model, name)
type_fmt = self.column_type_formatters.get(type(value))
if type_fmt is not None:
value = type_fmt(value)
return value
# Views
@expose('/')
def index_view(self):
"""
List view
"""
# Grab parameters from URL
page, sort_idx, sort_desc, search, filters = self._get_extra_args()
# Map column index to column name
sort_column = self._get_column_by_idx(sort_idx)
if sort_column is not None:
sort_column = sort_column[0]
# Get count and data
count, data = self.get_list(page, sort_column, sort_desc,
search, filters)
# Calculate number of pages
num_pages = count / self.page_size
if count % self.page_size != 0:
num_pages += 1
# Pregenerate filters
if self._filters:
filters_data = dict()
for idx, f in enumerate(self._filters):
flt_data = f.get_options(self)
if flt_data:
filters_data[idx] = flt_data
else:
filters_data = None
# Various URL generation helpers
def pager_url(p):
# Do not add page number if it is first page
if p == 0:
p = None
return self._get_url('.index_view', p, sort_idx, sort_desc,
search, filters)
def sort_url(column, invert=False):
desc = None
if invert and not sort_desc:
desc = 1
return self._get_url('.index_view', page, column, desc,
search, filters)
# Actions
actions, actions_confirmation = self.get_actions_list()
return self.render(self.list_template,
data=data,
# List
list_columns=self._list_columns,
sortable_columns=self._sortable_columns,
# Stuff
enumerate=enumerate,
get_pk_value=self.get_pk_value,
get_value=self.get_list_value,
return_url=self._get_url('.index_view',
page,
sort_idx,
sort_desc,
search,
filters),
# Pagination
count=count,
pager_url=pager_url,
num_pages=num_pages,
page=page,
# Sorting
sort_column=sort_idx,
sort_desc=sort_desc,
sort_url=sort_url,
# Search
search_supported=self._search_supported,
clear_search_url=self._get_url('.index_view',
None,
sort_idx,
sort_desc),
search=search,
# Filters
filters=self._filters,
filter_groups=self._filter_groups,
filter_types=self._filter_types,
filter_data=filters_data,
active_filters=filters,
# Actions
actions=actions,
actions_confirmation=actions_confirmation
)
@expose('/new/', methods=('GET', 'POST'))
def create_view(self):
"""
Create model view
"""
return_url = request.args.get('url') or url_for('.index_view')
if not self.can_create:
return redirect(return_url)
form = self.create_form()
if form.validate_on_submit():
if self.create_model(form):
if '_add_another' in request.form:
flash(gettext('Model was successfully created.'))
return redirect(url_for('.create_view', url=return_url))
else:
return redirect(return_url)
return self.render(self.create_template,
form=form,
return_url=return_url)
@expose('/edit/', methods=('GET', 'POST'))
def edit_view(self):
"""
Edit model view
"""
return_url = request.args.get('url') or url_for('.index_view')
if not self.can_edit:
return redirect(return_url)
id = request.args.get('id')
if id is None:
return redirect(return_url)
model = self.get_one(id)
if model is None:
return redirect(return_url)
form = self.edit_form(obj=model)
if form.validate_on_submit():
if self.update_model(form, model):
return redirect(return_url)
return self.render(self.edit_template,
form=form,
return_url=return_url)
@expose('/delete/', methods=('POST',))
def delete_view(self):
"""
Delete model view. Only POST method is allowed.
"""
return_url = request.args.get('url') or url_for('.index_view')
# TODO: Use post
if not self.can_delete:
return redirect(return_url)
id = request.args.get('id')
if id is None:
return redirect(return_url)
model = self.get_one(id)
if model:
self.delete_model(model)
return redirect(return_url)
@expose('/action/', methods=('POST',))
def action_view(self):
"""
Mass-model action view.
"""
return self.handle_action()
|
|
# -*- coding: utf-8 -*-
import unittest
import datetime
from dateutil.relativedelta import relativedelta
from trytond.tests.test_tryton import DB_NAME, USER, CONTEXT, POOL
import trytond.tests.test_tryton
from trytond.transaction import Transaction
from trytond.exceptions import UserError
class TestTransaction(unittest.TestCase):
"""
Test transaction
"""
def setUp(self):
"""
Set up data used in the tests.
"""
trytond.tests.test_tryton.install_module('payment_gateway')
self.Currency = POOL.get('currency.currency')
self.Company = POOL.get('company.company')
self.Party = POOL.get('party.party')
self.User = POOL.get('res.user')
self.Journal = POOL.get('account.journal')
self.PaymentGateway = POOL.get('payment_gateway.gateway')
self.PaymentGatewayTransaction = POOL.get('payment_gateway.transaction')
self.AccountMove = POOL.get('account.move')
def _create_fiscal_year(self, date=None, company=None):
"""
Creates a fiscal year and requried sequences
"""
FiscalYear = POOL.get('account.fiscalyear')
Sequence = POOL.get('ir.sequence')
Company = POOL.get('company.company')
if date is None:
date = datetime.date.today()
if company is None:
company, = Company.search([], limit=1)
fiscal_year, = FiscalYear.create([{
'name': '%s' % date.year,
'start_date': date + relativedelta(month=1, day=1),
'end_date': date + relativedelta(month=12, day=31),
'company': company,
'post_move_sequence': Sequence.create([{
'name': '%s' % date.year,
'code': 'account.move',
'company': company,
}])[0],
}])
FiscalYear.create_period([fiscal_year])
return fiscal_year
def _create_coa_minimal(self, company):
"""Create a minimal chart of accounts
"""
AccountTemplate = POOL.get('account.account.template')
Account = POOL.get('account.account')
account_create_chart = POOL.get(
'account.create_chart', type="wizard")
account_template, = AccountTemplate.search(
[('parent', '=', None)]
)
session_id, _, _ = account_create_chart.create()
create_chart = account_create_chart(session_id)
create_chart.account.account_template = account_template
create_chart.account.company = company
create_chart.transition_create_account()
receivable, = Account.search([
('kind', '=', 'receivable'),
('company', '=', company),
])
payable, = Account.search([
('kind', '=', 'payable'),
('company', '=', company),
])
create_chart.properties.company = company
create_chart.properties.account_receivable = receivable
create_chart.properties.account_payable = payable
create_chart.transition_create_properties()
def _get_account_by_kind(self, kind, company=None, silent=True):
"""Returns an account with given spec
:param kind: receivable/payable/expense/revenue
:param silent: dont raise error if account is not found
"""
Account = POOL.get('account.account')
Company = POOL.get('company.company')
if company is None:
company, = Company.search([], limit=1)
accounts = Account.search([
('kind', '=', kind),
('company', '=', company)
], limit=1)
if not accounts and not silent:
raise Exception("Account not found")
if not accounts:
return None
account, = accounts
return account
def setup_defaults(self):
"""
Creates default data for testing
"""
currency, = self.Currency.create([{
'name': 'US Dollar',
'code': 'USD',
'symbol': '$',
}])
with Transaction().set_context(company=None):
company_party, = self.Party.create([{
'name': 'Openlabs'
}])
self.company, = self.Company.create([{
'party': company_party,
'currency': currency,
}])
self.User.write([self.User(USER)], {
'company': self.company,
'main_company': self.company,
})
CONTEXT.update(self.User.get_preferences(context_only=True))
# Create Fiscal Year
self._create_fiscal_year(company=self.company.id)
# Create Chart of Accounts
self._create_coa_minimal(company=self.company.id)
# Create Cash journal
self.cash_journal, = self.Journal.search(
[('type', '=', 'cash')], limit=1
)
self.Journal.write([self.cash_journal], {
'debit_account': self._get_account_by_kind('expense').id
})
# Create a party
self.party, = self.Party.create([{
'name': 'Test party',
'addresses': [('create', [{
'name': 'Test Party',
'street': 'Test Street',
'city': 'Test City',
}])],
'account_receivable': self._get_account_by_kind(
'receivable').id,
}])
def test_0010_test_manual_transaction(self):
"""
Test manual transaction
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
gateway, = self.PaymentGateway.create([{
'name': 'Test Gateway',
'journal': self.cash_journal.id,
'provider': 'self',
'method': 'manual',
}])
with Transaction().set_context({'company': self.company.id}):
transaction, = self.PaymentGatewayTransaction.create([{
'party': self.party.id,
'credit_account': self.party.account_receivable.id,
'address': self.party.addresses[0].id,
'gateway': gateway.id,
'amount': 400,
}])
self.assert_(transaction)
# Process transaction
self.PaymentGatewayTransaction.process([transaction])
# Assert that transaction state is completed
self.assertEqual(transaction.state, 'completed')
# Assert that there are no account moves
self.assertEqual(self.AccountMove.search([], count="True"), 0)
# Post transaction
self.PaymentGatewayTransaction.post([transaction])
# Assert that the transaction is done
self.assertEqual(transaction.state, 'posted')
# Assert that an account move is created
self.assertEqual(self.AccountMove.search([], count="True"), 1)
self.assertEqual(self.party.receivable_today, -400)
self.assertEqual(self.cash_journal.debit_account.balance, 400)
def test_0210_test_dummy_gateway(self):
"""
Test dummy gateway transaction
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
with Transaction().set_context(
company=self.company.id, use_dummy=True):
gateway, = self.PaymentGateway.create([{
'name': 'Dummy Gateway',
'journal': self.cash_journal.id,
'provider': 'dummy',
'method': 'credit_card',
}])
transaction, = self.PaymentGatewayTransaction.create([{
'party': self.party.id,
'credit_account': self.party.account_receivable.id,
'address': self.party.addresses[0].id,
'gateway': gateway.id,
'amount': 400,
}])
self.assert_(transaction)
# Process transaction
with self.assertRaises(UserError):
self.PaymentGatewayTransaction.process([transaction])
def test_0220_test_dummy_gateway(self):
"""
Test dummy gateway transaction
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
with Transaction().set_context(
company=self.company.id, use_dummy=True):
gateway, = self.PaymentGateway.create([{
'name': 'Dummy Gateway',
'journal': self.cash_journal.id,
'provider': 'dummy',
'method': 'credit_card',
}])
transaction, = self.PaymentGatewayTransaction.create([{
'party': self.party.id,
'credit_account': self.party.account_receivable.id,
'address': self.party.addresses[0].id,
'gateway': gateway.id,
'amount': 400,
}])
self.assert_(transaction)
# Now authorize and capture a transaction with this
self.PaymentGatewayTransaction.authorize([transaction])
self.assertEqual(transaction.state, 'authorized')
# Now settle this transaction
self.PaymentGatewayTransaction.settle([transaction])
self.assertEqual(transaction.state, 'posted')
# Assert that an account move is created
self.assertEqual(self.AccountMove.search([], count="True"), 1)
self.assertEqual(self.party.receivable_today, -400)
self.assertEqual(self.cash_journal.debit_account.balance, 400)
def test_0220_test_dummy_profile_add(self):
"""
Test dummy gateway profile addition
"""
AddPaymentProfileWizard = POOL.get(
'party.party.payment_profile.add', type='wizard'
)
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
with Transaction().set_context(
company=self.company.id, use_dummy=True):
gateway, = self.PaymentGateway.create([{
'name': 'Dummy Gateway',
'journal': self.cash_journal.id,
'provider': 'dummy',
'method': 'credit_card',
}])
# create a profile
profile_wiz = AddPaymentProfileWizard(
AddPaymentProfileWizard.create()[0]
)
profile_wiz.card_info.party = self.party.id
profile_wiz.card_info.address = self.party.addresses[0].id
profile_wiz.card_info.provider = gateway.provider
profile_wiz.card_info.gateway = gateway
profile_wiz.card_info.owner = self.party.name
profile_wiz.card_info.number = '4111111111111111'
profile_wiz.card_info.expiry_month = '11'
profile_wiz.card_info.expiry_year = '2018'
profile_wiz.card_info.csc = '353'
profile_wiz.transition_add()
def test_0220_test_dummy_gateway_authorize_fail(self):
"""
Test dummy gateway transaction for authorization failure
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
with Transaction().set_context(
company=self.company.id, use_dummy=True):
gateway, = self.PaymentGateway.create([{
'name': 'Dummy Gateway',
'journal': self.cash_journal.id,
'provider': 'dummy',
'method': 'credit_card',
}])
transaction, = self.PaymentGatewayTransaction.create([{
'party': self.party.id,
'credit_account': self.party.account_receivable.id,
'address': self.party.addresses[0].id,
'gateway': gateway.id,
'amount': 400,
}])
self.assert_(transaction)
with Transaction().set_context(dummy_succeed=False):
# Now authorize and capture a transaction with this
self.PaymentGatewayTransaction.authorize([transaction])
self.assertEqual(transaction.state, 'failed')
def test_0220_test_dummy_gateway_capture(self):
"""
Test dummy gateway transaction for authorization failure
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
with Transaction().set_context(
company=self.company.id, use_dummy=True):
gateway, = self.PaymentGateway.create([{
'name': 'Dummy Gateway',
'journal': self.cash_journal.id,
'provider': 'dummy',
'method': 'credit_card',
}])
transaction, = self.PaymentGatewayTransaction.create([{
'party': self.party.id,
'credit_account': self.party.account_receivable.id,
'address': self.party.addresses[0].id,
'gateway': gateway.id,
'amount': 400,
}])
self.assert_(transaction)
self.PaymentGatewayTransaction.capture([transaction])
self.assertEqual(transaction.state, 'posted')
# Assert that an account move is created
self.assertEqual(self.AccountMove.search([], count="True"), 1)
self.assertEqual(self.party.receivable_today, -400)
self.assertEqual(self.cash_journal.debit_account.balance, 400)
def test_0220_test_dummy_gateway_capture_fail(self):
"""
Test dummy gateway transaction for authorization failure
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
with Transaction().set_context(
company=self.company.id, use_dummy=True):
gateway, = self.PaymentGateway.create([{
'name': 'Dummy Gateway',
'journal': self.cash_journal.id,
'provider': 'dummy',
'method': 'credit_card',
}])
transaction, = self.PaymentGatewayTransaction.create([{
'party': self.party.id,
'credit_account': self.party.account_receivable.id,
'address': self.party.addresses[0].id,
'gateway': gateway.id,
'amount': 400,
}])
self.assert_(transaction)
with Transaction().set_context(dummy_succeed=False):
self.PaymentGatewayTransaction.capture([transaction])
self.assertEqual(transaction.state, 'failed')
def test_0230_manual_gateway_auth_settle(self):
"""
Test authorize and capture with the manual payment gateway
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
gateway, = self.PaymentGateway.create([{
'name': 'Test Gateway',
'journal': self.cash_journal.id,
'provider': 'self',
'method': 'manual',
}])
with Transaction().set_context({'company': self.company.id}):
transaction, = self.PaymentGatewayTransaction.create([{
'party': self.party.id,
'credit_account': self.party.account_receivable.id,
'address': self.party.addresses[0].id,
'gateway': gateway.id,
'amount': 400,
}])
self.assert_(transaction)
# Process transaction
self.PaymentGatewayTransaction.authorize([transaction])
self.assertEqual(transaction.state, 'authorized')
# Assert that an account move is **not** created
self.assertEqual(self.AccountMove.search([], count="True"), 0)
self.assertEqual(self.party.receivable_today, 0)
self.assertEqual(self.cash_journal.debit_account.balance, 0)
# Capture transaction
self.PaymentGatewayTransaction.settle([transaction])
transaction = self.PaymentGatewayTransaction(transaction.id)
# Assert that the transaction is done
self.assertEqual(transaction.state, 'posted')
# Assert that an account move is created
self.assertEqual(self.AccountMove.search([], count="True"), 1)
self.assertEqual(self.party.receivable_today, -400)
self.assertEqual(self.cash_journal.debit_account.balance, 400)
def test_0240_manual_gateway_capture(self):
"""
Test authorize and capture with the manual payment gateway
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
gateway, = self.PaymentGateway.create([{
'name': 'Test Gateway',
'journal': self.cash_journal.id,
'provider': 'self',
'method': 'manual',
}])
with Transaction().set_context({'company': self.company.id}):
transaction, = self.PaymentGatewayTransaction.create([{
'party': self.party.id,
'credit_account': self.party.account_receivable.id,
'address': self.party.addresses[0].id,
'gateway': gateway.id,
'amount': 400,
}])
self.assert_(transaction)
# Process transaction
self.PaymentGatewayTransaction.capture([transaction])
self.assertEqual(transaction.state, 'posted')
# Assert that an account move is created
self.assertEqual(self.AccountMove.search([], count="True"), 1)
self.assertEqual(self.party.receivable_today, -400)
self.assertEqual(self.cash_journal.debit_account.balance, 400)
def test_0250_gateway_configuration(self):
"""
Test the configuration of payment gateway
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
gateway, = self.PaymentGateway.create([{
'name': 'Test Gateway',
'journal': self.cash_journal.id,
'provider': 'self',
'method': 'manual',
}])
self.PaymentGateway.test_gateway_configuration([gateway])
self.assertTrue(gateway.configured)
# Mark party required on journals's debit account and check if
# configuration is wrong
account = self.cash_journal.debit_account
account.party_required = True
account.save()
self.PaymentGateway.test_gateway_configuration([gateway])
self.assertFalse(gateway.configured)
def test_0260_test_dummy_delete_move(self):
"""
Test if the account move is deleted is transaction fails to post
using safe post
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
with Transaction().set_context(
company=self.company.id, use_dummy=True):
gateway, = self.PaymentGateway.create([{
'name': 'Dummy Gateway',
'journal': self.cash_journal.id,
'provider': 'dummy',
'method': 'credit_card',
}])
# Mark party required so that move does not post
account = self.cash_journal.debit_account
account.party_required = True
account.save()
transaction, = self.PaymentGatewayTransaction.create([{
'party': self.party.id,
'credit_account': self.party.account_receivable.id,
'address': self.party.addresses[0].id,
'gateway': gateway.id,
'amount': 400,
}])
self.assert_(transaction)
self.PaymentGatewayTransaction.capture([transaction])
# Test if transaction failed to post
self.assertEqual(transaction.state, 'completed')
# Check that the account move was deleted
self.assertEqual(len(transaction.logs), 1)
self.assertTrue(
"Deleted account move" in transaction.logs[0].log)
def suite():
"Define suite"
test_suite = trytond.tests.test_tryton.suite()
test_suite.addTests(
unittest.TestLoader().loadTestsFromTestCase(TestTransaction)
)
return test_suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python import keras
from tensorflow.python.framework import test_util
from tensorflow.python.keras import callbacks_v1
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training import adam
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class TestTensorBoardV1(test.TestCase):
@test_util.run_deprecated_v1
def test_TensorBoard(self):
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
yield (x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
else:
yield (x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
i += 1
i %= max_batch_index
# case: Sequential
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
# non_trainable_weights: moving_variance, moving_mean
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = callbacks_v1.TensorBoard(
log_dir=temp_dir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0)
# fit with validation data and accuracy
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
# fit generator with validation data
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data
# histogram_freq must be zero
tsb.histogram_freq = 0
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
callbacks=cbks,
verbose=0)
# fit generator with validation data and accuracy
tsb.histogram_freq = 1
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data and accuracy
tsb.histogram_freq = 0
model.fit_generator(
data_generator(True), len(x_train), epochs=2, callbacks=cbks)
assert os.path.exists(temp_dir)
@test_util.run_deprecated_v1
def test_TensorBoard_multi_input_output(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
filepath = os.path.join(tmpdir, 'logs')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
# simulate multi-input/output models
yield ([x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
else:
yield ([x_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
i += 1
i %= max_batch_index
inp1 = keras.Input((INPUT_DIM,))
inp2 = keras.Input((INPUT_DIM,))
inp = keras.layers.add([inp1, inp2])
hidden = keras.layers.Dense(2, activation='relu')(inp)
hidden = keras.layers.Dropout(0.1)(hidden)
output1 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
output2 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
model = keras.models.Model([inp1, inp2], [output1, output2])
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [
callbacks_v1.TensorBoard(
log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True,
write_grads=True,
batch_size=5)
]
# fit without validation data
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
callbacks=callbacks_factory(histogram_freq=0), epochs=3)
# fit with validation data and accuracy
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1), epochs=2)
# fit generator without validation data
model.fit_generator(data_generator(True), len(x_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=0))
# fit generator with validation data and accuracy
model.fit_generator(data_generator(True), len(x_train), epochs=2,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath)
@test_util.run_deprecated_v1
def test_Tensorboard_histogram_summaries_in_test_function(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.steps_seen = []
def add_summary(self, summary, global_step):
summary_obj = summary_pb2.Summary()
# ensure a valid Summary proto is being sent
if isinstance(summary, bytes):
summary_obj.ParseFromString(summary)
else:
assert isinstance(summary, summary_pb2.Summary)
summary_obj = summary
# keep track of steps seen for the merged_summary op,
# which contains the histogram summaries
if len(summary_obj.value) > 1:
self.steps_seen.append(global_step)
def flush(self):
pass
def close(self):
pass
def _init_writer(obj, _):
obj.writer = FileWriterStub(obj.log_dir)
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
# non_trainable_weights: moving_variance, moving_mean
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
callbacks_v1.TensorBoard._init_writer = _init_writer
tsb = callbacks_v1.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0)
self.assertAllEqual(tsb.writer.steps_seen, [0, 1, 2, 3, 4, 5])
@test_util.run_deprecated_v1
def test_Tensorboard_histogram_summaries_with_generator(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
def generator():
x = np.random.randn(10, 100).astype(np.float32)
y = np.random.randn(10, 10).astype(np.float32)
while True:
yield x, y
with self.cached_session():
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=10, input_dim=100)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = callbacks_v1.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5)
cbks = [tsb]
# fit with validation generator
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
validation_steps=2,
callbacks=cbks,
verbose=0)
with self.assertRaises(ValueError):
# fit with validation generator but no
# validation_steps
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
callbacks=cbks,
verbose=0)
self.assertTrue(os.path.exists(tmpdir))
def test_TensorBoard_with_ReduceLROnPlateau(self):
with self.cached_session():
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.5, patience=4, verbose=1),
callbacks_v1.TensorBoard(log_dir=temp_dir)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert os.path.exists(temp_dir)
@test_util.run_deprecated_v1
def test_Tensorboard_batch_logging(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.batches_logged = []
self.summary_values = []
self.summary_tags = []
def add_summary(self, summary, step):
self.summary_values.append(summary.value[0].simple_value)
self.summary_tags.append(summary.value[0].tag)
self.batches_logged.append(step)
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
for batch in range(5):
tb_cbk.on_batch_end(batch, {'acc': batch})
self.assertEqual(tb_cbk.writer.batches_logged, [0, 1, 2, 3, 4])
self.assertEqual(tb_cbk.writer.summary_values, [0., 1., 2., 3., 4.])
self.assertEqual(tb_cbk.writer.summary_tags, ['batch_acc'] * 5)
@test_util.run_deprecated_v1
def test_Tensorboard_epoch_and_batch_logging(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
def add_summary(self, summary, step):
if 'batch_' in summary.value[0].tag:
self.batch_summary = (step, summary)
elif 'epoch_' in summary.value[0].tag:
self.epoch_summary = (step, summary)
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0})
tb_cbk.on_train_end()
batch_step, batch_summary = tb_cbk.writer.batch_summary
self.assertEqual(batch_step, 0)
self.assertEqual(batch_summary.value[0].simple_value, 5.0)
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq='epoch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_epoch_end(0, {'acc': 10.0})
tb_cbk.on_train_end()
epoch_step, epoch_summary = tb_cbk.writer.epoch_summary
self.assertEqual(epoch_step, 0)
self.assertEqual(epoch_summary.value[0].simple_value, 10.0)
@test_util.run_in_graph_and_eager_modes
def test_Tensorboard_eager(self):
temp_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='binary_crossentropy',
optimizer=adam.AdamOptimizer(0.01),
metrics=['accuracy'])
cbks = [callbacks_v1.TensorBoard(log_dir=temp_dir)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertTrue(os.path.exists(temp_dir))
@test_util.run_deprecated_v1
def test_TensorBoard_update_freq(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.batch_summaries = []
self.epoch_summaries = []
def add_summary(self, summary, step):
if 'batch_' in summary.value[0].tag:
self.batch_summaries.append((step, summary))
elif 'epoch_' in summary.value[0].tag:
self.epoch_summaries.append((step, summary))
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
# Epoch mode
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq='epoch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(tb_cbk.writer.batch_summaries, [])
tb_cbk.on_epoch_end(0, {'acc': 10.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.epoch_summaries), 1)
tb_cbk.on_train_end()
# Batch mode
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
self.assertFalse(tb_cbk.writer.epoch_summaries)
tb_cbk.on_train_end()
# Integer mode
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq=20)
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertFalse(tb_cbk.writer.batch_summaries)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
tb_cbk.on_batch_end(0, {'acc': 10.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
self.assertFalse(tb_cbk.writer.epoch_summaries)
tb_cbk.on_train_end()
if __name__ == '__main__':
test.main()
|
|
# coding: utf-8
# Copyright (c) Materials Virtual Lab
# Distributed under the terms of the BSD License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
import re
import os
import yaml
import random
import logging
import subprocess
import xml.etree.ElementTree as ET
from collections import OrderedDict, defaultdict
import numpy as np
from monty.io import zopen
from monty.os.path import which
from monty.tempfile import ScratchDir
from monty.serialization import loadfn
from pymatgen import Structure, Lattice, Element
from veidt.potential.abstract import Potential
from veidt.potential.processing import pool_from, convert_docs
from veidt.potential.lammps.calcs import EnergyForceStress
module_dir = os.path.dirname(__file__)
soap_params = loadfn(os.path.join(module_dir, 'params', 'soap.json'))
class SOAPotential(Potential):
"""
This class implements Smooth Overlap of Atomic Position potential.
"""
pair_style = 'pair_style quip'
pair_coeff = 'pair_coeff * * {} {} {}'
def __init__(self, name=None, param=None):
"""
Args:
name (str): Name of force field.
param (dict): The parameter configuration of potential.
"""
self.name = name if name else "SOAPotential"
self.param = param if param else {}
self.specie = None
def _line_up(self, structure, energy, forces, virial_stress):
"""
Convert input structure, energy, forces, virial_stress to
proper configuration format for MLIP usage.
Args:
structure (Structure): Pymatgen Structure object.
energy (float): DFT-calculated energy of the system.
forces (list): The forces should have dimension
(num_atoms, 3).
virial_stress (list): stress should has 6 distinct
elements arranged in order [xx, yy, zz, xy, yz, xz].
Returns:
"""
full_virial_stress = [virial_stress[0], virial_stress[3], virial_stress[5],
virial_stress[3], virial_stress[1], virial_stress[4],
virial_stress[5], virial_stress[4], virial_stress[2]]
inputs = OrderedDict(Size=structure.num_sites,
SuperCell=structure.lattice,
AtomData=(structure, forces),
Energy=energy,
Stress=full_virial_stress)
lines = []
if 'Size' in inputs:
lines.append(str(inputs['Size']))
description = []
if 'Energy' in inputs:
description.append('dft_energy={}'.format(inputs['Energy']))
if 'Stress' in inputs:
description.append('dft_virial={%s}' %
'\t'.join(list(map(lambda f: str(f), inputs['Stress']))))
if 'SuperCell' in inputs:
SuperCell_str = list(map(lambda f: str(f), inputs['SuperCell'].matrix.ravel()))
description.append('Lattice="{}"'.format(' '.join(SuperCell_str)))
description.append("Properties=species:S:1:pos:R:3:Z:I:1:dft_force:R:3")
lines.append(' '.join(description))
if 'AtomData' in inputs:
format_str = '{:<10s}{:>16f}{:>16f}{:>16f}{:>8d}{:>16f}{:>16f}{:>16f}'
for i, (site, force) in enumerate(zip(structure, forces)):
lines.append(format_str.format(site.species_string,
*site.coords, site.specie.Z, *force))
return '\n'.join(lines)
def write_cfgs(self, filename, cfg_pool):
if not filename.endswith('.xyz'):
raise RuntimeError('The extended xyz file should end with ".xyz"')
lines = []
for dataset in cfg_pool:
if isinstance(dataset['structure'], dict):
structure = Structure.from_dict(dataset['structure'])
else:
structure = dataset['structure']
energy = dataset['outputs']['energy']
forces = dataset['outputs']['forces']
virial_stress = dataset['outputs']['virial_stress']
lines.append(self._line_up(structure, energy, forces, virial_stress))
self.specie = Element(structure.symbol_set[0])
with open(filename, 'w') as f:
f.write('\n'.join(lines))
return filename
def read_cfgs(self, filename, predict=False):
"""
Args:
filename (str): The configuration file to be read.
"""
type_convert = {'R': np.float32, 'I': np.int, 'S': np.str}
data_pool = []
with zopen(filename, 'rt') as f:
lines = f.read()
repl = re.compile('AT ')
lines = repl.sub('', string=lines)
block_pattern = re.compile('(\n[0-9]+\n|^[0-9]+\n)(.+?)(?=\n[0-9]+\n|$)', re.S)
lattice_pattern = re.compile('Lattice="(.+)"')
# energy_pattern = re.compile('dft_energy=(-?[0-9]+.[0-9]+)', re.I)
energy_pattern = re.compile(r'(?<=\S{3}\s|dft_)energy=(-?[0-9]+.[0-9]+)')
# stress_pattern = re.compile('dft_virial={(.+)}')
stress_pattern = re.compile(r'dft_virial=({|)(.+?)(}|) \S.*')
properties_pattern = re.compile(r'properties=(\S+)', re.I)
# position_pattern = re.compile('\n(.+)', re.S)
position_pattern = re.compile('\n(.+?)(?=\nE.*|\n\n.*|$)', re.S)
# formatify = lambda string: [float(s) for s in string.split()]
for (size, block) in block_pattern.findall(lines):
d = {'outputs': {}}
size = int(size)
lattice_str = lattice_pattern.findall(block)[0]
lattice = Lattice(list(map(lambda s: float(s), lattice_str.split())))
# energy_str = energy_pattern.findall(block)[0]
energy_str = energy_pattern.findall(block)[-1]
energy = float(energy_str)
# stress_str = stress_pattern.findall(block)[0]
stress_str = stress_pattern.findall(block)[0][1]
virial_stress = np.array(list(map(lambda s: float(s), stress_str.split())))
virial_stress = [virial_stress[i] for i in [0, 4, 8, 1, 5, 6]]
properties = properties_pattern.findall(block)[0].split(":")
labels_columns = OrderedDict()
labels = defaultdict()
for i in range(0, len(properties), 3):
labels_columns[properties[i]] = [int(properties[i + 2]), properties[i + 1]]
position_str = position_pattern.findall(block)[0].split('\n')
position = np.array([p.split() for p in position_str])
column_index = 0
for key in labels_columns:
num_columns, dtype = labels_columns[key]
labels[key] = position[:, column_index: column_index + num_columns].astype(type_convert[dtype])
column_index += num_columns
struct = Structure(lattice=lattice, species=labels['species'].ravel(),
coords=labels['pos'], coords_are_cartesian=True)
if predict:
forces = labels['force']
else:
forces = labels['dft_force']
d['structure'] = struct.as_dict()
d['outputs']['energy'] = energy
assert size == struct.num_sites
d['num_atoms'] = size
d['outputs']['forces'] = forces
d['outputs']['virial_stress'] = virial_stress
data_pool.append(d)
_, df = convert_docs(docs=data_pool)
return data_pool, df
def train(self, train_structures, energies=None, forces=None, stresses=None,
default_sigma=[0.0005, 0.1, 0.05, 0.01],
use_energies=True, use_forces=True, use_stress=False, **kwargs):
"""
Training data with gaussian process regression.
Args:
train_structures ([Structure]): The list of Pymatgen Structure object.
energies ([float]): The list of total energies of each structure
in structures list.
energies ([float]): List of total energies of each structure in
structures list.
forces ([np.array]): List of (m, 3) forces array of each structure
with m atoms in structures list. m can be varied with each
single structure case.
stresses (list): List of (6, ) virial stresses of each
structure in structures list.
default_sigma (list): Error criteria in energies, forces, stress
and hessian. Should have 4 numbers.
use_energies (bool): Whether to use dft total energies for training.
Default to True.
use_forces (bool): Whether to use dft atomic forces for training.
Default to True.
use_stress (bool): Whether to use dft virial stress for training.
Default to False.
kwargs:
l_max (int): Parameter to configure GAP. The band limit of
spherical harmonics basis function. Default to 12.
n_max (int): Parameter to configure GAP. The number of radial basis
function. Default to 10.
atom_sigma (float): Parameter to configure GAP. The width of gaussian
atomic density. Default to 0.5.
zeta (float): Present when covariance function type is do product.
Default to 4.
cutoff (float): Parameter to configure GAP. The cutoff radius.
Default to 4.0.
cutoff_transition_width (float): Parameter to configure GAP.
The transition width of cutoff radial. Default to 0.5.
delta (float): Parameter to configure Sparsification.
The signal variance of noise. Default to 1.
f0 (float): Parameter to configure Sparsification.
The signal mean of noise. Default to 0.0.
n_sparse (int): Parameter to configure Sparsification.
Number of sparse points.
covariance_type (str): Parameter to configure Sparsification.
The type of convariance function. Default to dot_product.
sparse_method (str): Method to perform clustering in sparsification.
Default to 'cur_points'.
sparse_jitter (float): Intrisic error of atomic/bond energy,
used to regularise the sparse covariance matrix.
Default to 1e-8.
e0 (float): Atomic energy value to be subtracted from energies
before fitting. Default to 0.0.
e0_offset (float): Offset of baseline. If zero, the offset is
the average atomic energy of the input data or the e0
specified manually. Default to 0.0.
"""
if not which('teach_sparse'):
raise RuntimeError("teach_sparse has not been found.\n",
"Please refer to https://github.com/libAtoms/QUIP for ",
"further detail.")
atoms_filename = 'train.xyz'
xml_filename = 'train.xml'
train_pool = pool_from(train_structures, energies, forces, stresses)
exe_command = ["teach_sparse"]
exe_command.append('at_file={}'.format(atoms_filename))
gap_configure_params = ['l_max', 'n_max', 'atom_sigma', 'zeta', 'cutoff',
'cutoff_transition_width', 'delta', 'f0', 'n_sparse',
'covariance_type', 'sparse_method']
preprocess_params = ['sparse_jitter', 'e0', 'e0_offset']
target_for_training = ['use_energies', 'use_forces', 'use_stress']
if len(default_sigma) != 4:
raise ValueError("The default sigma is supposed to have 4 numbers.")
gap_command = ['soap']
for param_name in gap_configure_params:
param = kwargs.get(param_name) if kwargs.get(param_name) \
else soap_params.get(param_name)
gap_command.append(param_name + '=' + '{}'.format(param))
exe_command.append("gap=" + "{" + "{}".format(' '.join(gap_command)) + "}")
for param_name in preprocess_params:
param = kwargs.get(param_name) if kwargs.get(param_name) \
else soap_params.get(param_name)
exe_command.append(param_name + '=' + '{}'.format(param))
default_sigma = [str(f) for f in default_sigma]
exe_command.append("default_sigma={%s}" % (' '.join(default_sigma)))
if use_energies:
exe_command.append('energy_parameter_name=dft_energy')
if use_forces:
exe_command.append('force_parameter_name=dft_force')
if use_stress:
exe_command.append('virial_parameter_name=dft_virial')
exe_command.append('gp_file={}'.format(xml_filename))
with ScratchDir('.'):
self.write_cfgs(filename=atoms_filename, cfg_pool=train_pool)
p = subprocess.Popen(exe_command, stdout=subprocess.PIPE)
stdout = p.communicate()[0]
rc = p.returncode
if rc != 0:
error_msg = 'QUIP exited with return code %d' % rc
msg = stdout.decode("utf-8").split('\n')[:-1]
try:
error_line = [i for i, m in enumerate(msg)
if m.startswith('ERROR')][0]
error_msg += ', '.join([e for e in msg[error_line:]])
except Exception:
error_msg += msg[-1]
raise RuntimeError(error_msg)
def get_xml(xml_file):
tree = ET.parse(xml_file)
root = tree.getroot()
potential_label = root.tag
gpcoordinates = list(root.iter('gpCoordinates'))[0]
param_file = gpcoordinates.get('sparseX_filename')
param = np.loadtxt(param_file)
return tree, param, potential_label
tree, param, potential_label = get_xml(xml_filename)
self.param['xml'] = tree
self.param['param'] = param
self.param['potential_label'] = potential_label
return rc
def write_param(self, xml_filename='soap.xml'):
"""
Write xml file to perform lammps calculation.
Args:
xml_filename (str): Filename to store xml formatted parameters.
"""
if not self.param:
raise RuntimeError("The xml and parameters should be provided.")
tree = self.param.get('xml')
root = tree.getroot()
gpcoordinates = list(root.iter('gpCoordinates'))[0]
param_filename = "{}.soapparam".format(self.name)
gpcoordinates.set('sparseX_filename', param_filename)
np.savetxt(param_filename, self.param.get('param'))
tree.write(xml_filename)
pair_coeff = self.pair_coeff.format(xml_filename,
'\"Potential xml_label={}\"'.format(self.param.get('potential_label')),
self.specie.Z)
ff_settings = [self.pair_style, pair_coeff]
return ff_settings
def evaluate(self, test_structures, ref_energies=None, ref_forces=None,
ref_stresses=None, predict_energies=True,
predict_forces=True, predict_stress=False):
"""
Evaluate energies, forces and stresses of structures with trained
interatomic potential.
Args:
test_structures ([Structure]): List of Pymatgen Structure Objects.
ref_energies ([float]): List of DFT-calculated total energies of
each structure in structures list.
ref_forces ([np.array]): List of DFT-calculated (m, 3) forces of
each structure with m atoms in structures list. m can be varied
with each single structure case.
ref_stresses (list): List of DFT-calculated (6, ) viriral stresses
of each structure in structures list.
predict_energies (bool): Whether to predict energies of configurations.
predict_forces (bool): Whether to predict forces of configurations.
predict_stress (bool): Whether to predict virial stress of
configurations.
"""
if not which('quip'):
raise RuntimeError("quip has not been found.\n",
"Please refer to https://github.com/libAtoms/QUIP for ",
"further detail.")
xml_file = 'predict.xml'
original_file = 'original.xyz'
predict_file = 'predict.xyz'
predict_pool = pool_from(test_structures, ref_energies,
ref_forces, ref_stresses)
with ScratchDir('.'):
_ = self.write_param(xml_file)
original_file = self.write_cfgs(original_file, cfg_pool=predict_pool)
_, df_orig = self.read_cfgs(original_file)
exe_command = ["quip"]
exe_command.append("atoms_filename={}".format(original_file))
exe_command.append("param_filename={}".format(xml_file))
if predict_energies:
exe_command.append("energy=T")
if predict_forces:
exe_command.append("forces=T")
if predict_stress:
exe_command.append("virial=T")
p = subprocess.Popen(exe_command, stdout=open(predict_file, 'w'))
stdout = p.communicate()[0]
rc = p.returncode
_, df_predict = self.read_cfgs(predict_file, predict=True)
return df_orig, df_predict
def predict(self, structure):
"""
Predict energy, forces and stresses of the structure.
Args:
structure (Structure): Pymatgen Structure object.
Returns:
energy, forces, stress
"""
calculator = EnergyForceStress(self)
energy, forces, stress = calculator.calculate(structures=[structure])[0]
return energy, forces, stress
def save(self, filename='param.yaml'):
"""
Save parameters of the potential.
Args:
filename (str): The file to store parameters of potential.
Returns:
(str)
"""
with open(filename, 'w') as f:
yaml.dump(self.param, f)
return filename
@staticmethod
def from_file(filename):
"""
Initialize potential with parameters file.
ARgs:
filename (str): The file storing parameters of potential.
Returns:
SOAPotential
"""
with open(filename) as f:
param = yaml.load(f)
return SOAPotential(param=param)
|
|
from mathFunctions import *
__all__ = [
"_expandGuideline",
"_compressGuideline",
"_pairGuidelines",
"_processMathOneGuidelines",
"_processMathTwoGuidelines"
]
def _expandGuideline(guideline):
"""
>>> guideline = dict(x=100, y=None, angle=None)
>>> _expandGuideline(guideline)
{'y': 0, 'x': 100, 'angle': 90}
>>> guideline = dict(y=100, x=None, angle=None)
>>> _expandGuideline(guideline)
{'y': 100, 'x': 0, 'angle': 0}
"""
guideline = dict(guideline)
x = guideline.get("x")
y = guideline.get("y")
# horizontal
if x is None:
guideline["x"] = 0
guideline["angle"] = 0
# vertical
elif y is None:
guideline["y"] = 0
guideline["angle"] = 90
return guideline
def _compressGuideline(guideline):
"""
>>> guideline = dict(x=100, y=0, angle=90)
>>> _compressGuideline(guideline)
{'y': None, 'x': 100, 'angle': None}
>>> guideline = dict(x=100, y=0, angle=270)
>>> _compressGuideline(guideline)
{'y': None, 'x': 100, 'angle': None}
>>> guideline = dict(y=100, x=0, angle=0)
>>> _compressGuideline(guideline)
{'y': 100, 'x': None, 'angle': None}
>>> guideline = dict(y=100, x=0, angle=180)
>>> _compressGuideline(guideline)
{'y': 100, 'x': None, 'angle': None}
"""
guideline = dict(guideline)
x = guideline["x"]
y = guideline["y"]
angle = guideline["angle"]
# horizontal
if x == 0 and angle in (0, 180):
guideline["x"] = None
guideline["angle"] = None
# vertical
elif y == 0 and angle in (90, 270):
guideline["y"] = None
guideline["angle"] = None
return guideline
def _pairGuidelines(guidelines1, guidelines2):
"""
name + identifier + (x, y, angle)
>>> guidelines1 = [
... dict(name="foo", identifier="1", x=1, y=2, angle=1),
... dict(name="foo", identifier="2", x=3, y=4, angle=2),
... ]
>>> guidelines2 = [
... dict(name="foo", identifier="2", x=3, y=4, angle=2),
... dict(name="foo", identifier="1", x=1, y=2, angle=1)
... ]
>>> expected = [
... (
... dict(name="foo", identifier="1", x=1, y=2, angle=1),
... dict(name="foo", identifier="1", x=1, y=2, angle=1)
... ),
... (
... dict(name="foo", identifier="2", x=3, y=4, angle=2),
... dict(name="foo", identifier="2", x=3, y=4, angle=2)
... )
... ]
>>> _pairGuidelines(guidelines1, guidelines2) == expected
True
name + identifier
>>> guidelines1 = [
... dict(name="foo", identifier="1", x=1, y=2, angle=1),
... dict(name="foo", identifier="2", x=1, y=2, angle=2),
... ]
>>> guidelines2 = [
... dict(name="foo", identifier="2", x=3, y=4, angle=3),
... dict(name="foo", identifier="1", x=3, y=4, angle=4)
... ]
>>> expected = [
... (
... dict(name="foo", identifier="1", x=1, y=2, angle=1),
... dict(name="foo", identifier="1", x=3, y=4, angle=4)
... ),
... (
... dict(name="foo", identifier="2", x=1, y=2, angle=2),
... dict(name="foo", identifier="2", x=3, y=4, angle=3)
... )
... ]
>>> _pairGuidelines(guidelines1, guidelines2) == expected
True
name + (x, y, angle)
>>> guidelines1 = [
... dict(name="foo", identifier="1", x=1, y=2, angle=1),
... dict(name="foo", identifier="2", x=3, y=4, angle=2),
... ]
>>> guidelines2 = [
... dict(name="foo", identifier="3", x=3, y=4, angle=2),
... dict(name="foo", identifier="4", x=1, y=2, angle=1)
... ]
>>> expected = [
... (
... dict(name="foo", identifier="1", x=1, y=2, angle=1),
... dict(name="foo", identifier="4", x=1, y=2, angle=1)
... ),
... (
... dict(name="foo", identifier="2", x=3, y=4, angle=2),
... dict(name="foo", identifier="3", x=3, y=4, angle=2)
... )
... ]
>>> _pairGuidelines(guidelines1, guidelines2) == expected
True
identifier + (x, y, angle)
>>> guidelines1 = [
... dict(name="foo", identifier="1", x=1, y=2, angle=1),
... dict(name="bar", identifier="2", x=3, y=4, angle=2),
... ]
>>> guidelines2 = [
... dict(name="xxx", identifier="2", x=3, y=4, angle=2),
... dict(name="yyy", identifier="1", x=1, y=2, angle=1)
... ]
>>> expected = [
... (
... dict(name="foo", identifier="1", x=1, y=2, angle=1),
... dict(name="yyy", identifier="1", x=1, y=2, angle=1)
... ),
... (
... dict(name="bar", identifier="2", x=3, y=4, angle=2),
... dict(name="xxx", identifier="2", x=3, y=4, angle=2)
... )
... ]
>>> _pairGuidelines(guidelines1, guidelines2) == expected
True
name
>>> guidelines1 = [
... dict(name="foo", identifier="1", x=1, y=2, angle=1),
... dict(name="bar", identifier="2", x=1, y=2, angle=2),
... ]
>>> guidelines2 = [
... dict(name="bar", identifier="3", x=3, y=4, angle=3),
... dict(name="foo", identifier="4", x=3, y=4, angle=4)
... ]
>>> expected = [
... (
... dict(name="foo", identifier="1", x=1, y=2, angle=1),
... dict(name="foo", identifier="4", x=3, y=4, angle=4)
... ),
... (
... dict(name="bar", identifier="2", x=1, y=2, angle=2),
... dict(name="bar", identifier="3", x=3, y=4, angle=3)
... )
... ]
>>> _pairGuidelines(guidelines1, guidelines2) == expected
True
identifier
>>> guidelines1 = [
... dict(name="foo", identifier="1", x=1, y=2, angle=1),
... dict(name="bar", identifier="2", x=1, y=2, angle=2),
... ]
>>> guidelines2 = [
... dict(name="xxx", identifier="2", x=3, y=4, angle=3),
... dict(name="yyy", identifier="1", x=3, y=4, angle=4)
... ]
>>> expected = [
... (
... dict(name="foo", identifier="1", x=1, y=2, angle=1),
... dict(name="yyy", identifier="1", x=3, y=4, angle=4)
... ),
... (
... dict(name="bar", identifier="2", x=1, y=2, angle=2),
... dict(name="xxx", identifier="2", x=3, y=4, angle=3)
... )
... ]
>>> _pairGuidelines(guidelines1, guidelines2) == expected
True
"""
guidelines1 = list(guidelines1)
guidelines2 = list(guidelines2)
pairs = []
# name + identifier + (x, y, angle)
_findPair(guidelines1, guidelines2, pairs, ("name", "identifier", "x", "y", "angle"))
# name + identifier matches
_findPair(guidelines1, guidelines2, pairs, ("name", "identifier"))
# name + (x, y, angle)
_findPair(guidelines1, guidelines2, pairs, ("name", "x", "y", "angle"))
# identifier + (x, y, angle)
_findPair(guidelines1, guidelines2, pairs, ("identifier", "x", "y", "angle"))
# name matches
if guidelines1 and guidelines2:
_findPair(guidelines1, guidelines2, pairs, ("name",))
# identifier matches
if guidelines1 and guidelines2:
_findPair(guidelines1, guidelines2, pairs, ("identifier",))
# done
return pairs
def _findPair(guidelines1, guidelines2, pairs, attrs):
removeFromGuidelines1 = []
for guideline1 in guidelines1:
match = None
for guideline2 in guidelines2:
attrMatch = False not in [guideline1.get(attr) == guideline2.get(attr) for attr in attrs]
if attrMatch:
match = guideline2
break
if match is not None:
guideline2 = match
removeFromGuidelines1.append(guideline1)
guidelines2.remove(guideline2)
pairs.append((guideline1, guideline2))
def _processMathOneGuidelines(guidelinePairs, ptFunc, func):
"""
>>> guidelines = [
... (
... dict(x=1, y=3, angle=5, name="test", identifier="1", color="0,0,0,0"),
... dict(x=6, y=8, angle=10, name=None, identifier=None, color=None)
... )
... ]
>>> expected = [
... dict(x=7, y=11, angle=15, name="test", identifier="1", color="0,0,0,0")
... ]
>>> _processMathOneGuidelines(guidelines, addPt, add) == expected
True
"""
result = []
for guideline1, guideline2 in guidelinePairs:
guideline = dict(guideline1)
pt1 = (guideline1["x"], guideline1["y"])
pt2 = (guideline2["x"], guideline2["y"])
guideline["x"], guideline["y"] = ptFunc(pt1, pt2)
angle1 = guideline1["angle"]
angle2 = guideline2["angle"]
guideline["angle"] = func(angle1, angle2)
result.append(guideline)
return result
def _processMathTwoGuidelines(guidelines, factor, func):
"""
>>> guidelines = [
... dict(x=2, y=3, angle=5, name="test", identifier="1", color="0,0,0,0")
... ]
>>> expected = [
... dict(x=4, y=4.5, angle=3.75, name="test", identifier="1", color="0,0,0,0")
... ]
>>> result = _processMathTwoGuidelines(guidelines, (2, 1.5), mul)
>>> result[0]["angle"] = round(result[0]["angle"], 2)
>>> result == expected
True
"""
result = []
for guideline in guidelines:
guideline = dict(guideline)
guideline["x"] = func(guideline["x"], factor[0])
guideline["y"] = func(guideline["y"], factor[1])
angle = guideline["angle"]
guideline["angle"] = factorAngle(angle, factor, func)
result.append(guideline)
return result
def _roundGuidelines(guidelines, digits=None):
"""
>>> guidelines = [
... dict(x=1.99, y=3.01, angle=5, name="test", identifier="1", color="0,0,0,0")
... ]
>>> expected = [
... dict(x=2, y=3, angle=5, name="test", identifier="1", color="0,0,0,0")
... ]
>>> result = _roundGuidelines(guidelines)
>>> result == expected
True
"""
results = []
for guideline in guidelines:
guideline = dict(guideline)
guideline['x'] = _roundNumber(guideline['x'], digits)
guideline['y'] = _roundNumber(guideline['y'], digits)
results.append(guideline)
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
"""Test event helpers."""
# pylint: disable=protected-access
import asyncio
import unittest
from datetime import datetime, timedelta
from astral import Astral
from homeassistant.bootstrap import setup_component
import homeassistant.core as ha
from homeassistant.const import MATCH_ALL
from homeassistant.helpers.event import (
track_point_in_utc_time,
track_point_in_time,
track_utc_time_change,
track_time_change,
track_state_change,
track_time_interval,
track_sunrise,
track_sunset,
)
from homeassistant.components import sun
import homeassistant.util.dt as dt_util
from tests.common import get_test_home_assistant
class TestEventHelpers(unittest.TestCase):
"""Test the Home Assistant event helpers."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_track_point_in_time(self):
"""Test track point in time."""
before_birthday = datetime(1985, 7, 9, 12, 0, 0, tzinfo=dt_util.UTC)
birthday_paulus = datetime(1986, 7, 9, 12, 0, 0, tzinfo=dt_util.UTC)
after_birthday = datetime(1987, 7, 9, 12, 0, 0, tzinfo=dt_util.UTC)
runs = []
track_point_in_utc_time(
self.hass, lambda x: runs.append(1), birthday_paulus)
self._send_time_changed(before_birthday)
self.hass.block_till_done()
self.assertEqual(0, len(runs))
self._send_time_changed(birthday_paulus)
self.hass.block_till_done()
self.assertEqual(1, len(runs))
# A point in time tracker will only fire once, this should do nothing
self._send_time_changed(birthday_paulus)
self.hass.block_till_done()
self.assertEqual(1, len(runs))
track_point_in_time(
self.hass, lambda x: runs.append(1), birthday_paulus)
self._send_time_changed(after_birthday)
self.hass.block_till_done()
self.assertEqual(2, len(runs))
unsub = track_point_in_time(
self.hass, lambda x: runs.append(1), birthday_paulus)
unsub()
self._send_time_changed(after_birthday)
self.hass.block_till_done()
self.assertEqual(2, len(runs))
def test_track_time_change(self):
"""Test tracking time change."""
wildcard_runs = []
specific_runs = []
unsub = track_time_change(self.hass, lambda x: wildcard_runs.append(1))
unsub_utc = track_utc_time_change(
self.hass, lambda x: specific_runs.append(1), second=[0, 30])
self._send_time_changed(datetime(2014, 5, 24, 12, 0, 0))
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(1, len(wildcard_runs))
self._send_time_changed(datetime(2014, 5, 24, 12, 0, 15))
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(2, len(wildcard_runs))
self._send_time_changed(datetime(2014, 5, 24, 12, 0, 30))
self.hass.block_till_done()
self.assertEqual(2, len(specific_runs))
self.assertEqual(3, len(wildcard_runs))
unsub()
unsub_utc()
self._send_time_changed(datetime(2014, 5, 24, 12, 0, 30))
self.hass.block_till_done()
self.assertEqual(2, len(specific_runs))
self.assertEqual(3, len(wildcard_runs))
def test_track_state_change(self):
"""Test track_state_change."""
# 2 lists to track how often our callbacks get called
specific_runs = []
wildcard_runs = []
wildercard_runs = []
def specific_run_callback(entity_id, old_state, new_state):
specific_runs.append(1)
track_state_change(
self.hass, 'light.Bowl', specific_run_callback, 'on', 'off')
@ha.callback
def wildcard_run_callback(entity_id, old_state, new_state):
wildcard_runs.append((old_state, new_state))
track_state_change(self.hass, 'light.Bowl', wildcard_run_callback)
@asyncio.coroutine
def wildercard_run_callback(entity_id, old_state, new_state):
wildercard_runs.append((old_state, new_state))
track_state_change(self.hass, MATCH_ALL, wildercard_run_callback)
# Adding state to state machine
self.hass.states.set("light.Bowl", "on")
self.hass.block_till_done()
self.assertEqual(0, len(specific_runs))
self.assertEqual(1, len(wildcard_runs))
self.assertEqual(1, len(wildercard_runs))
self.assertIsNone(wildcard_runs[-1][0])
self.assertIsNotNone(wildcard_runs[-1][1])
# Set same state should not trigger a state change/listener
self.hass.states.set('light.Bowl', 'on')
self.hass.block_till_done()
self.assertEqual(0, len(specific_runs))
self.assertEqual(1, len(wildcard_runs))
self.assertEqual(1, len(wildercard_runs))
# State change off -> on
self.hass.states.set('light.Bowl', 'off')
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(2, len(wildcard_runs))
self.assertEqual(2, len(wildercard_runs))
# State change off -> off
self.hass.states.set('light.Bowl', 'off', {"some_attr": 1})
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(3, len(wildcard_runs))
self.assertEqual(3, len(wildercard_runs))
# State change off -> on
self.hass.states.set('light.Bowl', 'on')
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(4, len(wildcard_runs))
self.assertEqual(4, len(wildercard_runs))
self.hass.states.remove('light.bowl')
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(5, len(wildcard_runs))
self.assertEqual(5, len(wildercard_runs))
self.assertIsNotNone(wildcard_runs[-1][0])
self.assertIsNone(wildcard_runs[-1][1])
self.assertIsNotNone(wildercard_runs[-1][0])
self.assertIsNone(wildercard_runs[-1][1])
# Set state for different entity id
self.hass.states.set('switch.kitchen', 'on')
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(5, len(wildcard_runs))
self.assertEqual(6, len(wildercard_runs))
def test_track_time_interval(self):
"""Test tracking time interval."""
specific_runs = []
utc_now = dt_util.utcnow()
unsub = track_time_interval(
self.hass, lambda x: specific_runs.append(1),
timedelta(seconds=10)
)
self._send_time_changed(utc_now + timedelta(seconds=5))
self.hass.block_till_done()
self.assertEqual(0, len(specific_runs))
self._send_time_changed(utc_now + timedelta(seconds=13))
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(utc_now + timedelta(minutes=20))
self.hass.block_till_done()
self.assertEqual(2, len(specific_runs))
unsub()
self._send_time_changed(utc_now + timedelta(seconds=30))
self.hass.block_till_done()
self.assertEqual(2, len(specific_runs))
def test_track_sunrise(self):
"""Test track the sunrise."""
latitude = 32.87336
longitude = 117.22743
# Setup sun component
self.hass.config.latitude = latitude
self.hass.config.longitude = longitude
setup_component(self.hass, sun.DOMAIN, {
sun.DOMAIN: {sun.CONF_ELEVATION: 0}})
# Get next sunrise/sunset
astral = Astral()
utc_now = dt_util.utcnow()
mod = -1
while True:
next_rising = (astral.sunrise_utc(utc_now +
timedelta(days=mod), latitude, longitude))
if next_rising > utc_now:
break
mod += 1
# Track sunrise
runs = []
unsub = track_sunrise(self.hass, lambda: runs.append(1))
offset_runs = []
offset = timedelta(minutes=30)
unsub2 = track_sunrise(self.hass, lambda: offset_runs.append(1),
offset)
# run tests
self._send_time_changed(next_rising - offset)
self.hass.block_till_done()
self.assertEqual(0, len(runs))
self.assertEqual(0, len(offset_runs))
self._send_time_changed(next_rising)
self.hass.block_till_done()
self.assertEqual(1, len(runs))
self.assertEqual(0, len(offset_runs))
self._send_time_changed(next_rising + offset)
self.hass.block_till_done()
self.assertEqual(2, len(runs))
self.assertEqual(1, len(offset_runs))
unsub()
unsub2()
self._send_time_changed(next_rising + offset)
self.hass.block_till_done()
self.assertEqual(2, len(runs))
self.assertEqual(1, len(offset_runs))
def test_track_sunset(self):
"""Test track the sunset."""
latitude = 32.87336
longitude = 117.22743
# Setup sun component
self.hass.config.latitude = latitude
self.hass.config.longitude = longitude
setup_component(self.hass, sun.DOMAIN, {
sun.DOMAIN: {sun.CONF_ELEVATION: 0}})
# Get next sunrise/sunset
astral = Astral()
utc_now = dt_util.utcnow()
mod = -1
while True:
next_setting = (astral.sunset_utc(utc_now +
timedelta(days=mod), latitude, longitude))
if next_setting > utc_now:
break
mod += 1
# Track sunset
runs = []
unsub = track_sunset(self.hass, lambda: runs.append(1))
offset_runs = []
offset = timedelta(minutes=30)
unsub2 = track_sunset(self.hass, lambda: offset_runs.append(1), offset)
# Run tests
self._send_time_changed(next_setting - offset)
self.hass.block_till_done()
self.assertEqual(0, len(runs))
self.assertEqual(0, len(offset_runs))
self._send_time_changed(next_setting)
self.hass.block_till_done()
self.assertEqual(1, len(runs))
self.assertEqual(0, len(offset_runs))
self._send_time_changed(next_setting + offset)
self.hass.block_till_done()
self.assertEqual(2, len(runs))
self.assertEqual(1, len(offset_runs))
unsub()
unsub2()
self._send_time_changed(next_setting + offset)
self.hass.block_till_done()
self.assertEqual(2, len(runs))
self.assertEqual(1, len(offset_runs))
def _send_time_changed(self, now):
"""Send a time changed event."""
self.hass.bus.fire(ha.EVENT_TIME_CHANGED, {ha.ATTR_NOW: now})
def test_periodic_task_minute(self):
"""Test periodic tasks per minute."""
specific_runs = []
unsub = track_utc_time_change(
self.hass, lambda x: specific_runs.append(1), minute='/5')
self._send_time_changed(datetime(2014, 5, 24, 12, 0, 0))
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 24, 12, 3, 0))
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 24, 12, 5, 0))
self.hass.block_till_done()
self.assertEqual(2, len(specific_runs))
unsub()
self._send_time_changed(datetime(2014, 5, 24, 12, 5, 0))
self.hass.block_till_done()
self.assertEqual(2, len(specific_runs))
def test_periodic_task_hour(self):
"""Test periodic tasks per hour."""
specific_runs = []
unsub = track_utc_time_change(
self.hass, lambda x: specific_runs.append(1), hour='/2')
self._send_time_changed(datetime(2014, 5, 24, 22, 0, 0))
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 24, 23, 0, 0))
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 24, 0, 0, 0))
self.hass.block_till_done()
self.assertEqual(2, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 25, 1, 0, 0))
self.hass.block_till_done()
self.assertEqual(2, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 25, 2, 0, 0))
self.hass.block_till_done()
self.assertEqual(3, len(specific_runs))
unsub()
self._send_time_changed(datetime(2014, 5, 25, 2, 0, 0))
self.hass.block_till_done()
self.assertEqual(3, len(specific_runs))
def test_periodic_task_day(self):
"""Test periodic tasks per day."""
specific_runs = []
unsub = track_utc_time_change(
self.hass, lambda x: specific_runs.append(1), day='/2')
self._send_time_changed(datetime(2014, 5, 2, 0, 0, 0))
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 3, 12, 0, 0))
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 4, 0, 0, 0))
self.hass.block_till_done()
self.assertEqual(2, len(specific_runs))
unsub()
self._send_time_changed(datetime(2014, 5, 4, 0, 0, 0))
self.hass.block_till_done()
self.assertEqual(2, len(specific_runs))
def test_periodic_task_year(self):
"""Test periodic tasks per year."""
specific_runs = []
unsub = track_utc_time_change(
self.hass, lambda x: specific_runs.append(1), year='/2')
self._send_time_changed(datetime(2014, 5, 2, 0, 0, 0))
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2015, 5, 2, 0, 0, 0))
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2016, 5, 2, 0, 0, 0))
self.hass.block_till_done()
self.assertEqual(2, len(specific_runs))
unsub()
self._send_time_changed(datetime(2016, 5, 2, 0, 0, 0))
self.hass.block_till_done()
self.assertEqual(2, len(specific_runs))
def test_periodic_task_wrong_input(self):
"""Test periodic tasks with wrong input."""
specific_runs = []
track_utc_time_change(
self.hass, lambda x: specific_runs.append(1), year='/two')
self._send_time_changed(datetime(2014, 5, 2, 0, 0, 0))
self.hass.block_till_done()
self.assertEqual(0, len(specific_runs))
|
|
# Lint as: python3
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calibrates the approximated Heston model parameters using option prices."""
from typing import Callable, Tuple
import tensorflow.compat.v2 as tf
import tf_quant_finance as tff
from tf_quant_finance import types
from tf_quant_finance import utils
from tf_quant_finance.math import make_val_and_grad_fn
from tf_quant_finance.math import optimizer
from tf_quant_finance.models.heston import approximations
__all__ = [
'CalibrationResult',
'calibration',
]
@utils.dataclass
class CalibrationResult:
"""Collection of calibrated Heston parameters.
For a review of the Heston model and the conventions used, please see the
docstring for `HestonModel`, or for `calibration` below.
Attributes:
initial_variance: Rank-1 `Tensor` specifying the initial volatility levels.
mean_reversion: Rank-1 `Tensor` specifying the mean reversion rate.
theta: Rank-1 `Tensor` specifying the long run price variance.
volvol: Rank-1 `Tensor` specifying the vol-vol parameters.
rho: Rank-1 `Tensor` specifying the correlations between the forward and
the stochastic volatility.
"""
initial_variance: types.RealTensor
mean_reversion: types.RealTensor
theta: types.RealTensor
volvol: types.RealTensor
rho: types.RealTensor
def calibration(
*,
prices: types.RealTensor,
strikes: types.RealTensor,
expiries: types.RealTensor,
spots: types.RealTensor = None,
forwards: types.RealTensor = None,
is_call_options: types.BoolTensor,
discount_rates: types.RealTensor = None,
dividend_rates: types.RealTensor = None,
discount_factors: types.RealTensor = None,
mean_reversion: types.RealTensor,
initial_variance: types.RealTensor,
theta: types.RealTensor,
volvol: types.RealTensor,
rho: types.RealTensor,
mean_reversion_lower_bound: types.RealTensor = 0.001,
mean_reversion_upper_bound: types.RealTensor = 0.5,
initial_variance_lower_bound: types.RealTensor = 0.0,
initial_variance_upper_bound: types.RealTensor = 1.0,
theta_lower_bound: types.RealTensor = 0.0,
theta_upper_bound: types.RealTensor = 1.0,
volvol_lower_bound: types.RealTensor = 0.0,
volvol_upper_bound: types.RealTensor = 1.0,
rho_lower_bound: types.RealTensor = -1.0,
rho_upper_bound: types.RealTensor = 1.0,
optimizer_fn: Callable[..., types.RealTensor] = None,
tolerance: types.RealTensor = 1e-6,
maximum_iterations: types.RealTensor = 100,
validate_args: bool = False,
dtype: tf.DType = None,
name: str = None
) -> Tuple[CalibrationResult, types.BoolTensor, types.IntTensor]:
"""Calibrates the Heston model using European option prices.
Represents the Ito process:
```None
dX(t) = -V(t) / 2 * dt + sqrt(V(t)) * dW_{X}(t),
dV(t) = mean_reversion(t) * (theta(t) - V(t)) * dt
+ volvol(t) * sqrt(V(t)) * dW_{V}(t)
```
where `W_{X}` and `W_{V}` are 1D Brownian motions with a correlation `rho(t)`.
`mean_reversion`, `theta`, `volvol`, and `rho` are positive piecewise constant
functions of time. Here `V(t)` represents the process variance at time `t` and
`X` represents logarithm of the spot price at time `t`.
`mean_reversion` corresponds to the mean reversion rate, `theta` is the long
run price variance, and `volvol` is the volatility of the volatility.
#### Example
```python
import tf_quant_finance as tff
import tensorflow.compat.v2 as tf
dtype = np.float64
# Set some market conditions.
observed_prices = np.array(
[[29.33668202, 23.98724723, 19.54631658, 15.9022847, 12.93591534],
[15.64785924, 21.05865247, 27.11907971, 33.74249536, 40.8485591]],
dtype=dtype)
strikes = np.array(
[[80.0, 90.0, 100.0, 110.0, 120.0], [80.0, 90.0, 100.0, 110.0, 120.0]],
dtype=dtype)
expiries = np.array([[0.5], [1.0]], dtype=dtype)
forwards = 100.0
is_call_options = np.array([[True], [False]])
# Calibrate the model.
# In this example, we are calibrating a Heston model.
models, is_converged, _ = tff.models.heston.calibration(
prices=observed_prices,
strikes=strikes,
expiries=expiries,
forwards=forwards,
is_call_options=is_call_options,
mean_reversion=np.array([0.3], dtype=dtype),
initial_variance=np.array([0.8], dtype=dtype),
theta=np.array([0.75], dtype=dtype),
volvol=np.array([0.1], dtype=dtype),
rho=np.array(0.0, dtype=dtype),
optimizer_fn=tff.math.optimizer.bfgs_minimize,
maximum_iterations=1000)
# This will return two `HestonModel`s, where:
# Model 1 has mean_reversion = 0.3, initial_variance = 0.473, volvol = 0.1,
# theta = 0.724 and rho = 0.028
# Model 2 has mean_reversion = 0.3, initial_variance = 0.45, volvol = 0.1,
# theta = 0.691 and rho = -0.073
```
Args:
prices: Real `Tensor` of shape [batch_size, num_strikes] specifying the
observed options prices. Here, `batch_size` refers to the number of Heston
models calibrated in this invocation.
strikes: Real `Tensor` of shape [batch_size, num_strikes] specifying the
strike prices of the options.
expiries: Real `Tensor` of shape compatible with [batch_size, num_strikes]
specifying the options expiries.
spots: A real `Tensor` of any shape that broadcasts to the shape of the
`volatilities`. The current spot price of the underlying. Either this
argument or the `forwards` (but not both) must be supplied.
forwards: A real `Tensor` of any shape that broadcasts to the shape of
`strikes`. The forwards to maturity. Either this argument or the
`spots` must be supplied but both must not be supplied.
is_call_options: A boolean `Tensor` of shape compatible with
[batch_size, num_strikes] specifying whether or not the prices correspond
to a call option (=True) or a put option (=False).
discount_rates: An optional real `Tensor` of same dtype as the
`strikes` and of the shape that broadcasts with `strikes`.
If not `None`, discount factors are calculated as e^(-rT),
where r are the discount rates, or risk free rates. At most one of
discount_rates and discount_factors can be supplied.
Default value: `None`, equivalent to r = 0 and discount factors = 1 when
discount_factors also not given.
dividend_rates: An optional real `Tensor` of same dtype as the
`strikes` and of the shape that broadcasts with `volatilities`.
Default value: `None`, equivalent to q = 0.
discount_factors: An optional real `Tensor` of same dtype as the
`strikes`. If not `None`, these are the discount factors to expiry
(i.e. e^(-rT)). Mutually exclusive with `discount_rates`. If neither is
given, no discounting is applied (i.e. the undiscounted option price is
returned). If `spots` is supplied and `discount_factors` is not `None`
then this is also used to compute the forwards to expiry. At most one of
`discount_rates` and `discount_factors` can be supplied.
Default value: `None`, which maps to e^(-rT) calculated from
discount_rates.
mean_reversion: Real `Tensor` of shape [batch_size], specifying the initial
estimate of the mean reversion parameter.
initial_variance: Real `Tensor` of shape [batch_size], specifying the
initial estimate of the variance parameter.
Values must satisfy `0 <= initial_variance`.
theta: Real `Tensor` of shape [batch_size], specifying the initial estimate
of the long run variance parameter. Values must satisfy `0 <= theta`.
volvol: Real `Tensor` of shape [batch_size], specifying the initial estimate
of the vol-vol parameter. Values must satisfy `0 <= volvol`.
rho: Real `Tensor` of shape [batch_size], specifying the initial estimate of
the correlation between the forward price and the volatility. Values must
satisfy -1 < `rho` < 1.
mean_reversion_lower_bound: Real `Tensor` compatible with that of
`mean_reversion`, specifying the lower bound for the calibrated value.
Default value: 0.001.
mean_reversion_upper_bound: Real `Tensor` compatible with that of
`mean_reversion`, specifying the lower bound for the calibrated value.
Default value: 0.5.
initial_variance_lower_bound: Real `Tensor` compatible with that of
`initial_variance`, specifying the lower bound for the calibrated value.
Default value: 0.0.
initial_variance_upper_bound: Real `Tensor` compatible with that of
`initial_variance`, specifying the lower bound for the calibrated value.
Default value: 1.0.
theta_lower_bound: Real `Tensor` compatible with that of `theta`,
specifying the lower bound for the calibrated value.
Default value: 0.0.
theta_upper_bound: Real `Tensor` compatible with that of `theta`,
specifying the lower bound for the calibrated value.
Default value: 1.0.
volvol_lower_bound: Real `Tensor` compatible with that of `volvol`,
specifying the lower bound for the calibrated value.
Default value: 0.0.
volvol_upper_bound: Real `Tensor` compatible with that of `volvol`,
specifying the lower bound for the calibrated value.
Default value: 1.0.
rho_lower_bound: Real `Tensor` compatible with that of `rho`, specifying the
lower bound for the calibrated value.
Default value: -1.0.
rho_upper_bound: Real `Tensor` compatible with that of `rho`, specifying the
upper bound for the calibrated value.
Default value: 1.0.
optimizer_fn: Optional Python callable which implements the algorithm used
to minimize the objective function during calibration. It should have
the following interface: result =
optimizer_fn(value_and_gradients_function, initial_position, tolerance,
max_iterations) `value_and_gradients_function` is a Python callable that
accepts a point as a real `Tensor` and returns a tuple of `Tensor`s of
real dtype containing the value of the function and its gradient at that
point. 'initial_position' is a real `Tensor` containing the starting
point of the optimization, 'tolerance' is a real scalar `Tensor` for
stopping tolerance for the procedure and `max_iterations` specifies the
maximum number of iterations.
`optimizer_fn` should return a namedtuple containing the items: `position`
(a tensor containing the optimal value), `converged` (a boolean
indicating whether the optimize converged according the specified
criteria), `failed` (a boolean indicating if the optimization resulted
in a failure), `num_iterations` (the number of iterations used), and
`objective_value` ( the value of the objective function at the optimal
value). The default value for `optimizer_fn` is None and conjugate
gradient algorithm is used.
Default value: `None` - indicating LBFGS minimizer.
tolerance: Scalar `Tensor` of real dtype. The absolute tolerance for
terminating the iterations.
Default value: 1e-6.
maximum_iterations: Scalar positive integer `Tensor`. The maximum number of
iterations during the optimization.
Default value: 100.
validate_args: Boolean value indicating whether or not to validate the shape
and values of the input arguments, at the potential expense of performance
degredation.
Default value: False.
dtype: The default dtype to use when converting values to `Tensor`s.
Default value: `None`, which means that default dtypes inferred by
TensorFlow are used.
name: String. The name to give to the ops created by this function.
Default value: `None`, which maps to the default name 'heston_calibration'
Returns:
A Tuple of three elements:
* The first is a `CalibrationResult` holding the calibrated alpha, beta,
volvol, and rho, where alpha[i] corresponds to the calibrated `alpha` of
the i-th batch, etc.
* A `Tensor` of optimization status for each batch element (whether the
optimization algorithm has found the optimal point based on the specified
convergance criteria).
* A `Tensor` containing the number of iterations performed by the
optimization algorithm.
"""
if (spots is None) == (forwards is None):
raise ValueError('Either spots or forwards must be supplied but not both.')
if (discount_rates is not None) and (discount_factors is not None):
raise ValueError('At most one of discount_rates and discount_factors may '
'be supplied')
name = name or 'heston_calibration'
with tf.name_scope(name):
prices = tf.convert_to_tensor(prices, dtype=dtype, name='prices')
dtype = dtype or prices.dtype
# Extract batch shape
batch_shape = prices.shape.as_list()[:-1]
if None in batch_shape:
batch_shape = tff.utils.get_shape(prices)[:-1]
strikes = tf.convert_to_tensor(strikes, dtype=dtype, name='strikes')
expiries = tf.convert_to_tensor(expiries, dtype=dtype, name='expiries')
if discount_factors is not None:
discount_factors = tf.convert_to_tensor(
discount_factors, dtype=dtype, name='discount_factors')
if discount_rates is not None:
discount_rates = tf.convert_to_tensor(
discount_rates, dtype=dtype, name='discount_rates')
elif discount_factors is not None:
discount_rates = -tf.math.log(discount_factors) / expiries
else:
discount_rates = tf.convert_to_tensor(
0.0, dtype=dtype, name='discount_rates')
if dividend_rates is None:
dividend_rates = 0.0
dividend_rates = tf.convert_to_tensor(
dividend_rates, dtype=dtype, name='dividend_rates')
if forwards is not None:
forwards = tf.convert_to_tensor(forwards, dtype=dtype, name='forwards')
else:
spots = tf.convert_to_tensor(spots, dtype=dtype, name='spots')
cost_of_carries = discount_rates - dividend_rates
forwards = spots * tf.exp(cost_of_carries * expiries)
is_call_options = tf.convert_to_tensor(
is_call_options, name='is_call', dtype=tf.bool)
if optimizer_fn is None:
optimizer_fn = optimizer.lbfgs_minimize
# Pre-processing mean_reversion
mean_reversion = tf.convert_to_tensor(mean_reversion, dtype=dtype)
mean_reversion_lower_bound = tf.convert_to_tensor(
mean_reversion_lower_bound, dtype=dtype)
mean_reversion_upper_bound = tf.convert_to_tensor(
mean_reversion_upper_bound, dtype=dtype)
mean_reversion = _assert_parameter_valid(
validate_args,
mean_reversion,
lower_bound=mean_reversion_lower_bound,
upper_bound=mean_reversion_upper_bound,
message='`mean_reversion` is invalid!')
# Broadcast mean_reversion to the correct batch shape
mean_reversion = tf.broadcast_to(mean_reversion, batch_shape,
name='broadcast_mean_reversion')
initial_mean_reversion = _to_unconstrained(
mean_reversion, mean_reversion_lower_bound, mean_reversion_upper_bound)
# Pre-processing theta
theta = tf.convert_to_tensor(theta, dtype=dtype)
theta_lower_bound = tf.convert_to_tensor(theta_lower_bound, dtype=dtype)
theta_upper_bound = tf.convert_to_tensor(theta_upper_bound, dtype=dtype)
theta = _assert_parameter_valid(
validate_args,
theta,
lower_bound=theta_lower_bound,
upper_bound=theta_upper_bound,
message='`theta` is invalid!')
# Broadcast theta to the correct batch shape
theta = tf.broadcast_to(theta, batch_shape, name='broadcast_theta')
initial_theta = _to_unconstrained(theta, theta_lower_bound,
theta_upper_bound)
# Pre-processing volvol
volvol_lower_bound = tf.convert_to_tensor(volvol_lower_bound, dtype=dtype)
volvol_upper_bound = tf.convert_to_tensor(volvol_upper_bound, dtype=dtype)
volvol = _assert_parameter_valid(
validate_args,
volvol,
lower_bound=volvol_lower_bound,
upper_bound=volvol_upper_bound,
message='`volvol` is invalid!')
# Broadcast volvol to the correct batch shape
volvol = tf.broadcast_to(volvol, batch_shape, name='broadcast_volvol')
initial_volvol = _to_unconstrained(volvol, volvol_lower_bound,
volvol_upper_bound)
initial_variance_lower_bound = tf.convert_to_tensor(
initial_variance_lower_bound, dtype=dtype)
initial_variance_upper_bound = tf.convert_to_tensor(
initial_variance_upper_bound, dtype=dtype)
initial_variance = _assert_parameter_valid(
validate_args, initial_variance,
lower_bound=initial_variance_lower_bound,
upper_bound=initial_variance_upper_bound,
message='`initial_variance` is invalid!')
# Broadcast initial_variance to the correct batch shape
initial_variance = tf.broadcast_to(initial_variance, batch_shape,
name='broadcast_initial_variance')
initial_variance_unconstrained = _to_unconstrained(
initial_variance, initial_variance_lower_bound,
initial_variance_upper_bound)
# Pre-processing rho
rho_lower_bound = tf.convert_to_tensor(rho_lower_bound, dtype=dtype)
rho_upper_bound = tf.convert_to_tensor(rho_upper_bound, dtype=dtype)
rho = _assert_parameter_valid(
validate_args,
rho,
lower_bound=rho_lower_bound,
upper_bound=rho_upper_bound,
message='`rho` is invalid!')
# Broadcast rho to the correct batch shape
rho = tf.broadcast_to(rho, batch_shape, name='broadcast_rho')
initial_rho = _to_unconstrained(rho, rho_lower_bound, rho_upper_bound)
# Construct initial state for the optimizer
# shape[batch_size, 5]
initial_x = tf.stack(
[initial_variance_unconstrained, initial_theta, initial_volvol,
initial_mean_reversion, initial_rho], axis=-1)
optimizer_arg_handler = _OptimizerArgHandler(
volvol_lower_bound=volvol_lower_bound,
volvol_upper_bound=volvol_upper_bound,
rho_lower_bound=rho_lower_bound,
rho_upper_bound=rho_upper_bound,
mean_reversion_lower_bound=mean_reversion_lower_bound,
mean_reversion_upper_bound=mean_reversion_upper_bound,
theta_lower_bound=theta_lower_bound,
theta_upper_bound=theta_upper_bound,
initial_variance_lower_bound=initial_variance_lower_bound,
initial_variance_upper_bound=initial_variance_upper_bound)
loss_function = _get_loss_for_price_based_calibration(
prices=prices,
strikes=strikes,
expiries=expiries,
forwards=forwards,
is_call_options=is_call_options,
dtype=dtype,
optimizer_arg_handler=optimizer_arg_handler)
optimization_result = optimizer_fn(
loss_function,
initial_position=initial_x,
tolerance=tolerance,
max_iterations=maximum_iterations)
calibration_parameters = optimization_result.position
calibrated_theta = optimizer_arg_handler.get_theta(calibration_parameters)
calibrated_volvol = optimizer_arg_handler.get_volvol(calibration_parameters)
calibrated_rho = optimizer_arg_handler.get_rho(calibration_parameters)
calibrated_initial_variance = optimizer_arg_handler.get_initial_variance(
calibration_parameters)
calibrated_mean_reversion = optimizer_arg_handler.get_mean_reversion(
calibration_parameters)
return (CalibrationResult(mean_reversion=calibrated_mean_reversion,
volvol=calibrated_volvol,
rho=calibrated_rho,
theta=calibrated_theta,
initial_variance=calibrated_initial_variance),
optimization_result.converged,
optimization_result.num_iterations)
def _get_loss_for_price_based_calibration(
*, prices, strikes, expiries, forwards, is_call_options,
optimizer_arg_handler, dtype):
"""Creates a loss function to be used in volatility-based calibration."""
def _price_transform(x):
return tf.math.log1p(x)
scaled_target_values = _price_transform(prices)
@make_val_and_grad_fn
def loss_function(x):
"""Loss function for the price-based optimization."""
candidate_initial_variance = optimizer_arg_handler.get_initial_variance(x)
candidate_theta = optimizer_arg_handler.get_theta(x)
candidate_volvol = optimizer_arg_handler.get_volvol(x)
candidate_rho = optimizer_arg_handler.get_rho(x)
candidate_mean_reversion = optimizer_arg_handler.get_mean_reversion(x)
values = approximations.european_option_price(
strikes=strikes,
expiries=expiries,
forwards=forwards,
is_call_options=is_call_options,
variances=tf.expand_dims(candidate_initial_variance, axis=-1),
mean_reversion=tf.expand_dims(candidate_mean_reversion, axis=-1),
volvol=tf.expand_dims(candidate_volvol, axis=-1),
rho=tf.expand_dims(candidate_rho, axis=-1),
theta=tf.expand_dims(candidate_theta, axis=-1),
dtype=dtype)
scaled_values = _price_transform(values)
return tf.math.reduce_mean(
(scaled_values - scaled_target_values)**2, axis=-1)
return loss_function
@utils.dataclass
class _OptimizerArgHandler:
"""Handles the packing/transformation of estimated parameters."""
theta_lower_bound: types.RealTensor
theta_upper_bound: types.RealTensor
volvol_lower_bound: types.RealTensor
volvol_upper_bound: types.RealTensor
rho_lower_bound: types.RealTensor
rho_upper_bound: types.RealTensor
mean_reversion_lower_bound: types.RealTensor
mean_reversion_upper_bound: types.RealTensor
initial_variance_lower_bound: types.RealTensor
initial_variance_upper_bound: types.RealTensor
def get_initial_variance(
self, packed_optimizer_args: types.RealTensor) -> types.RealTensor:
"""Unpack and return the rho parameter."""
initial_variance = packed_optimizer_args[..., 0]
return _to_constrained(
initial_variance,
self.initial_variance_lower_bound, self.initial_variance_upper_bound)
def get_theta(self,
packed_optimizer_args: types.RealTensor) -> types.RealTensor:
"""Unpack and return the volvol parameter."""
theta = packed_optimizer_args[..., 1]
return _to_constrained(
theta,
self.theta_lower_bound, self.theta_upper_bound)
def get_volvol(self,
packed_optimizer_args: types.RealTensor) -> types.RealTensor:
"""Unpack and return the volvol parameter."""
volvol = packed_optimizer_args[..., 2]
return _to_constrained(
volvol,
self.volvol_lower_bound, self.volvol_upper_bound)
def get_mean_reversion(
self, packed_optimizer_args: types.RealTensor) -> types.RealTensor:
"""Unpack and return the mean_reversion parameter."""
mean_reversion = packed_optimizer_args[..., 3]
return _to_constrained(mean_reversion, self.mean_reversion_lower_bound,
self.mean_reversion_upper_bound)
def get_rho(self,
packed_optimizer_args: types.RealTensor) -> types.RealTensor:
"""Unpack and return the rho parameter."""
rho = packed_optimizer_args[..., -1]
return _to_constrained(
rho,
self.rho_lower_bound, self.rho_upper_bound)
def _scale(x, lb, ub):
"""Scales the values to be normalized to [lb, ub]."""
return (x - lb) / (ub - lb)
def _to_unconstrained(x, lb, ub):
"""Scale and apply inverse-sigmoid."""
x = _scale(x, lb, ub)
return -tf.math.log((1.0 - x) / x)
def _to_constrained(x, lb, ub):
"""Sigmoid and unscale."""
x = 1.0 / (1.0 + tf.math.exp(-x))
return x * (ub - lb) + lb
def _assert_parameter_valid(validate_args, x, lower_bound, upper_bound,
message):
"""Helper to check that the input parameter is valid."""
if validate_args:
with tf.control_dependencies([
tf.debugging.assert_greater_equal(x, lower_bound, message=message),
tf.debugging.assert_less_equal(x, upper_bound, message=message),
]):
return tf.identity(x)
else:
return x
|
|
#!/usr/bin/env python
# standard library imports
import os
import sys
import logging
import re
import hashlib
# 3rd party imports
import simplejson
# KBase imports
import biokbase.Transform.script_utils as script_utils
# transformation method that can be called if this module is imported
# Note the logger has different levels it could be run.
# See: https://docs.python.org/2/library/logging.html#logging-levels
#
# The default level is set to INFO which includes everything except DEBUG
def transform(shock_service_url=None, handle_service_url=None,
output_file_name=None, input_directory=None,
working_directory=None, shock_id=None, handle_id=None,
input_mapping=None, fasta_reference_only=False,
level=logging.INFO, logger=None):
"""
Converts FASTA file to KBaseGenomes.ContigSet json string.
Note the MD5 for the contig is generated by uppercasing the sequence.
The ContigSet MD5 is generated by taking the MD5 of joining the sorted
list of individual contig's MD5s with a comma separator.
Args:
shock_service_url: A url for the KBase SHOCK service.
handle_service_url: A url for the KBase Handle Service.
output_file_name: A file name where the output JSON string should be stored.
If the output file name is not specified the name will default
to the name of the input file appended with '_contig_set'
input_directory: The directory the resulting json file will be written to.
working_directory: The directory the resulting json file will be written to.
shock_id: Shock id for the fasta file if it already exists in shock
handle_id: Handle id for the fasta file if it already exists as a handle
input_mapping: JSON string mapping of input files to expected types.
If you don't get this you need to scan the input
directory and look for your files.
fasta_reference_only: Creates a reference to the fasta file in Shock, but does not store the sequences in the workspace object. Not recommended unless the fasta file is larger than 1GB. This is the default behavior for files that large.
level: Logging level, defaults to logging.INFO.
Returns:
JSON file on disk that can be saved as a KBase workspace object.
Authors:
Jason Baumohl, Matt Henderson
"""
if logger is None:
logger = script_utils.stderrlogger(__file__)
logger.info("Starting conversion of FASTA to KBaseGenomes.ContigSet")
token = os.environ.get('KB_AUTH_TOKEN')
if input_mapping is None:
logger.info("Scanning for FASTA files.")
valid_extensions = [".fa",".fasta",".fna"]
files = os.listdir(working_directory)
fasta_files = [x for x in files if os.path.splitext(x)[-1] in valid_extensions]
assert len(fasta_files) != 0
logger.info("Found {0}".format(str(fasta_files)))
input_file_name = os.path.join(working_directory,files[0])
if len(fasta_files) > 1:
logger.warning("Not sure how to handle multiple FASTA files in this context. Using {0}".format(input_file_name))
else:
input_file_name = os.path.join(os.path.join(input_directory, "FASTA.DNA.Assembly"), simplejson.loads(input_mapping)["FASTA.DNA.Assembly"])
logger.info("Building Object.")
if not os.path.isfile(input_file_name):
raise Exception("The input file name {0} is not a file!".format(input_file_name))
if not os.path.isdir(args.working_directory):
raise Exception("The working directory {0} is not a valid directory!".format(working_directory))
# default if not too large
contig_set_has_sequences = True
if fasta_reference_only:
contig_set_has_sequences = False
fasta_filesize = os.stat(input_file_name).st_size
if fasta_filesize > 1000000000:
# Fasta file too large to save sequences into the ContigSet object.
contigset_warn = """The FASTA input file seems to be too large. A ContigSet
object will be created without sequences, but will
contain a reference to the file."""
logger.warning(contigset_warn)
contig_set_has_sequences = False
input_file_handle = open(input_file_name, 'r')
fasta_header = None
sequence_list = []
fasta_dict = dict()
first_header_found = False
contig_set_md5_list = []
# Pattern for replacing white space
pattern = re.compile(r'\s+')
sequence_exists = False
for current_line in input_file_handle:
if (current_line[0] == ">"):
# found a header line
# Wrap up previous fasta sequence
if (not sequence_exists) and first_header_found:
logger.error("There is no sequence related to FASTA record : {0}".format(fasta_header))
raise Exception("There is no sequence related to FASTA record : {0}".format(fasta_header))
if not first_header_found:
first_header_found = True
else:
# build up sequence and remove all white space
total_sequence = ''.join(sequence_list)
total_sequence = re.sub(pattern, '', total_sequence)
fasta_key = fasta_header.strip()
contig_dict = dict()
contig_dict["id"] = fasta_key
contig_dict["length"] = len(total_sequence)
contig_dict["name"] = fasta_key
contig_dict["description"] = "Note MD5 is generated from uppercasing the sequence"
contig_md5 = hashlib.md5(total_sequence.upper()).hexdigest()
contig_dict["md5"] = contig_md5
contig_set_md5_list.append(contig_md5)
if contig_set_has_sequences:
contig_dict["sequence"]= total_sequence
else:
contig_dict["sequence"]= ""
fasta_dict[fasta_key] = contig_dict
# get set up for next fasta sequence
sequence_list = []
sequence_exists = False
fasta_header = current_line.replace('>','')
else:
sequence_list.append(current_line)
sequence_exists = True
input_file_handle.close()
# wrap up last fasta sequence
if (not sequence_exists) and first_header_found:
logger.error("There is no sequence related to FASTA record : {0}".format(fasta_header))
raise Exception("There is no sequence related to FASTA record : {0}".format(fasta_header))
else:
# build up sequence and remove all white space
total_sequence = ''.join(sequence_list)
total_sequence = re.sub(pattern, '', total_sequence)
fasta_key = fasta_header.strip()
contig_dict = dict()
contig_dict["id"] = fasta_key
contig_dict["length"] = len(total_sequence)
contig_dict["name"] = fasta_key
contig_dict["description"] = "Note MD5 is generated from uppercasing the sequence"
contig_md5 = hashlib.md5(total_sequence.upper()).hexdigest()
contig_dict["md5"]= contig_md5
contig_set_md5_list.append(contig_md5)
if contig_set_has_sequences:
contig_dict["sequence"] = total_sequence
else:
contig_dict["sequence"]= ""
fasta_dict[fasta_key] = contig_dict
if output_file_name is None:
# default to input file name minus file extenstion adding "_contig_set" to the end
base = os.path.basename(input_file_name)
output_file_name = "{0}_contig_set.json".format(os.path.splitext(base)[0])
contig_set_dict = dict()
contig_set_dict["md5"] = hashlib.md5(",".join(sorted(contig_set_md5_list))).hexdigest()
contig_set_dict["id"] = output_file_name
contig_set_dict["name"] = output_file_name
contig_set_dict["source"] = "KBase"
contig_set_dict["source_id"] = os.path.basename(input_file_name)
contig_set_dict["contigs"] = [fasta_dict[x] for x in sorted(fasta_dict.keys())]
if shock_id is None:
shock_info = script_utils.upload_file_to_shock(logger, shock_service_url, input_file_name, token=token)
shock_id = shock_info["id"]
contig_set_dict["fasta_ref"] = shock_id
# For future development if the type is updated to the handle_reference instead of a shock_reference
# This generates the json for the object
objectString = simplejson.dumps(contig_set_dict, sort_keys=True, indent=4)
logger.info("ContigSet data structure creation completed. Writing out JSON.")
output_file_path = os.path.join(working_directory,output_file_name)
with open(output_file_path, "w") as outFile:
outFile.write(objectString)
logger.info("Conversion completed.")
# called only if script is run from command line
if __name__ == "__main__":
script_details = script_utils.parse_docs(transform.__doc__)
import argparse
parser = argparse.ArgumentParser(prog=__file__,
description=script_details["Description"],
epilog=script_details["Authors"])
parser.add_argument('--shock_service_url',
help=script_details["Args"]["shock_service_url"],
action='store', type=str, nargs='?', required=True)
parser.add_argument('--handle_service_url',
help=script_details["Args"]["handle_service_url"],
action='store', type=str, nargs='?', default=None, required=False)
parser.add_argument('--input_directory',
help=script_details["Args"]["input_directory"],
action='store', type=str, nargs='?', required=True)
parser.add_argument('--working_directory',
help=script_details["Args"]["working_directory"],
action='store', type=str, nargs='?', required=True)
parser.add_argument('--output_file_name',
help=script_details["Args"]["output_file_name"],
action='store', type=str, nargs='?', default=None, required=False)
parser.add_argument('--shock_id',
help=script_details["Args"]["shock_id"],
action='store', type=str, nargs='?', default=None, required=False)
parser.add_argument('--handle_id',
help=script_details["Args"]["handle_id"],
action='store', type=str, nargs='?', default=None, required=False)
parser.add_argument('--input_mapping',
help=script_details["Args"]["input_mapping"],
action='store', type=unicode, nargs='?', default=None, required=False)
# Example of a custom argument specific to this uploader
parser.add_argument('--fasta_reference_only',
help=script_details["Args"]["fasta_reference_only"],
action='store_true', required=False)
args, unknown = parser.parse_known_args()
logger = script_utils.stderrlogger(__file__)
try:
transform(shock_service_url = args.shock_service_url,
handle_service_url = args.handle_service_url,
output_file_name = args.output_file_name,
input_directory = args.input_directory,
working_directory = args.working_directory,
shock_id = args.shock_id,
handle_id = args.handle_id,
input_mapping = args.input_mapping,
fasta_reference_only = args.fasta_reference_only,
logger = logger)
except Exception, e:
logger.exception(e)
sys.exit(1)
sys.exit(0)
|
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## This program is published under a GPLv2 license
"""
General utility functions.
"""
from __future__ import with_statement
import os,sys,socket,types
import random,time
import gzip,zlib,cPickle
import re,struct,array
import subprocess
import warnings
warnings.filterwarnings("ignore","tempnam",RuntimeWarning, __name__)
from config import conf
from data import MTU
from error import log_runtime,log_loading,log_interactive, Scapy_Exception
from base_classes import BasePacketList
WINDOWS=sys.platform.startswith("win32")
###########
## Tools ##
###########
def get_temp_file(keep=False, autoext=""):
f = os.tempnam("","scapy")
if not keep:
conf.temp_files.append(f+autoext)
return f
def sane_color(x):
r=""
for i in x:
j = ord(i)
if (j < 32) or (j >= 127):
r=r+conf.color_theme.not_printable(".")
else:
r=r+i
return r
def sane(x):
r=""
for i in x:
j = ord(i)
if (j < 32) or (j >= 127):
r=r+"."
else:
r=r+i
return r
def lhex(x):
if type(x) in (int,long):
return hex(x)
elif type(x) is tuple:
return "(%s)" % ", ".join(map(lhex, x))
elif type(x) is list:
return "[%s]" % ", ".join(map(lhex, x))
else:
return x
@conf.commands.register
def hexdump(x):
x=str(x)
l = len(x)
i = 0
while i < l:
print "%04x " % i,
for j in xrange(16):
if i+j < l:
print "%02X" % ord(x[i+j]),
else:
print " ",
if j%16 == 7:
print "",
print " ",
print sane_color(x[i:i+16])
i += 16
@conf.commands.register
def linehexdump(x, onlyasc=0, onlyhex=0):
x = str(x)
l = len(x)
if not onlyasc:
for i in xrange(l):
print "%02X" % ord(x[i]),
print "",
if not onlyhex:
print sane_color(x)
def chexdump(x):
x=str(x)
print ", ".join(map(lambda x: "%#04x"%ord(x), x))
def hexstr(x, onlyasc=0, onlyhex=0):
s = []
if not onlyasc:
s.append(" ".join(map(lambda x:"%02x"%ord(x), x)))
if not onlyhex:
s.append(sane(x))
return " ".join(s)
@conf.commands.register
def hexdiff(x,y):
"""Show differences between 2 binary strings"""
x=str(x)[::-1]
y=str(y)[::-1]
SUBST=1
INSERT=1
d={}
d[-1,-1] = 0,(-1,-1)
for j in xrange(len(y)):
d[-1,j] = d[-1,j-1][0]+INSERT, (-1,j-1)
for i in xrange(len(x)):
d[i,-1] = d[i-1,-1][0]+INSERT, (i-1,-1)
for j in xrange(len(y)):
for i in xrange(len(x)):
d[i,j] = min( ( d[i-1,j-1][0]+SUBST*(x[i] != y[j]), (i-1,j-1) ),
( d[i-1,j][0]+INSERT, (i-1,j) ),
( d[i,j-1][0]+INSERT, (i,j-1) ) )
backtrackx = []
backtracky = []
i=len(x)-1
j=len(y)-1
while not (i == j == -1):
i2,j2 = d[i,j][1]
backtrackx.append(x[i2+1:i+1])
backtracky.append(y[j2+1:j+1])
i,j = i2,j2
x = y = i = 0
colorize = { 0: lambda x:x,
-1: conf.color_theme.left,
1: conf.color_theme.right }
dox=1
doy=0
l = len(backtrackx)
while i < l:
separate=0
linex = backtrackx[i:i+16]
liney = backtracky[i:i+16]
xx = sum(len(k) for k in linex)
yy = sum(len(k) for k in liney)
if dox and not xx:
dox = 0
doy = 1
if dox and linex == liney:
doy=1
if dox:
xd = y
j = 0
while not linex[j]:
j += 1
xd -= 1
print colorize[doy-dox]("%04x" % xd),
x += xx
line=linex
else:
print " ",
if doy:
yd = y
j = 0
while not liney[j]:
j += 1
yd -= 1
print colorize[doy-dox]("%04x" % yd),
y += yy
line=liney
else:
print " ",
print " ",
cl = ""
for j in xrange(16):
if i+j < l:
if line[j]:
col = colorize[(linex[j]!=liney[j])*(doy-dox)]
print col("%02X" % ord(line[j])),
if linex[j]==liney[j]:
cl += sane_color(line[j])
else:
cl += col(sane(line[j]))
else:
print " ",
cl += " "
else:
print " ",
if j == 7:
print "",
print " ",cl
if doy or not yy:
doy=0
dox=1
i += 16
else:
if yy:
dox=0
doy=1
else:
i += 16
crc32 = zlib.crc32
if struct.pack("H",1) == "\x00\x01": # big endian
def checksum(pkt):
if len(pkt) % 2 == 1:
pkt += "\0"
s = sum(array.array("H", pkt))
s = (s >> 16) + (s & 0xffff)
s += s >> 16
s = ~s
return s & 0xffff
else:
def checksum(pkt):
if len(pkt) % 2 == 1:
pkt += "\0"
s = sum(array.array("H", pkt))
s = (s >> 16) + (s & 0xffff)
s += s >> 16
s = ~s
return (((s>>8)&0xff)|s<<8) & 0xffff
def _fletcher16(charbuf):
# This is based on the GPLed C implementation in Zebra <http://www.zebra.org/>
c0 = c1 = 0
for char in charbuf:
c0 += ord(char)
c1 += c0
c0 %= 255
c1 %= 255
return (c0,c1)
@conf.commands.register
def fletcher16_checksum(binbuf):
""" Calculates Fletcher-16 checksum of the given buffer.
Note:
If the buffer contains the two checkbytes derived from the Fletcher-16 checksum
the result of this function has to be 0. Otherwise the buffer has been corrupted.
"""
(c0,c1)= _fletcher16(binbuf)
return (c1 << 8) | c0
@conf.commands.register
def fletcher16_checkbytes(binbuf, offset):
""" Calculates the Fletcher-16 checkbytes returned as 2 byte binary-string.
Including the bytes into the buffer (at the position marked by offset) the
global Fletcher-16 checksum of the buffer will be 0. Thus it is easy to verify
the integrity of the buffer on the receiver side.
For details on the algorithm, see RFC 2328 chapter 12.1.7 and RFC 905 Annex B.
"""
# This is based on the GPLed C implementation in Zebra <http://www.zebra.org/>
if len(binbuf) < offset:
raise Exception("Packet too short for checkbytes %d" % len(binbuf))
binbuf = binbuf[:offset] + "\x00\x00" + binbuf[offset + 2:]
(c0,c1)= _fletcher16(binbuf)
x = ((len(binbuf) - offset - 1) * c0 - c1) % 255
if (x <= 0):
x += 255
y = 510 - c0 - x
if (y > 255):
y -= 255
return chr(x) + chr(y)
def warning(x):
log_runtime.warning(x)
def mac2str(mac):
return "".join(map(lambda x: chr(int(x,16)), mac.split(":")))
def str2mac(s):
return ("%02x:"*6)[:-1] % tuple(map(ord, s))
def strxor(x,y):
return "".join(map(lambda x,y:chr(ord(x)^ord(y)),x,y))
# Workarround bug 643005 : https://sourceforge.net/tracker/?func=detail&atid=105470&aid=643005&group_id=5470
try:
socket.inet_aton("255.255.255.255")
except socket.error:
def inet_aton(x):
if x == "255.255.255.255":
return "\xff"*4
else:
return socket.inet_aton(x)
else:
inet_aton = socket.inet_aton
inet_ntoa = socket.inet_ntoa
try:
inet_ntop = socket.inet_ntop
inet_pton = socket.inet_pton
except AttributeError:
from scapy.pton_ntop import *
log_loading.info("inet_ntop/pton functions not found. Python IPv6 support not present")
def atol(x):
try:
ip = inet_aton(x)
except socket.error:
ip = inet_aton(socket.gethostbyname(x))
return struct.unpack("!I", ip)[0]
def ltoa(x):
return inet_ntoa(struct.pack("!I", x&0xffffffff))
def itom(x):
return (0xffffffff00000000L>>x)&0xffffffffL
def do_graph(graph,prog=None,format=None,target=None,type=None,string=None,options=None):
"""do_graph(graph, prog=conf.prog.dot, format="svg",
target="| conf.prog.display", options=None, [string=1]):
string: if not None, simply return the graph string
graph: GraphViz graph description
format: output type (svg, ps, gif, jpg, etc.), passed to dot's "-T" option
target: filename or redirect. Defaults pipe to Imagemagick's display program
prog: which graphviz program to use
options: options to be passed to prog"""
if format is None:
if WINDOWS:
format = "png" # use common format to make sure a viewer is installed
else:
format = "svg"
if string:
return graph
if type is not None:
format=type
if prog is None:
prog = conf.prog.dot
start_viewer=False
if target is None:
if WINDOWS:
tempfile = os.tempnam("", "scapy") + "." + format
target = "> %s" % tempfile
start_viewer = True
else:
target = "| %s" % conf.prog.display
if format is not None:
format = "-T %s" % format
w,r = os.popen2("%s %s %s %s" % (prog,options or "", format or "", target))
w.write(graph)
w.close()
if start_viewer:
# Workaround for file not found error: We wait until tempfile is written.
waiting_start = time.time()
while not os.path.exists(tempfile):
time.sleep(0.1)
if time.time() - waiting_start > 3:
warning("Temporary file '%s' could not be written. Graphic will not be displayed." % tempfile)
break
else:
if conf.prog.display == conf.prog._default:
os.startfile(tempfile)
else:
subprocess.Popen([conf.prog.display, tempfile])
_TEX_TR = {
"{":"{\\tt\\char123}",
"}":"{\\tt\\char125}",
"\\":"{\\tt\\char92}",
"^":"\\^{}",
"$":"\\$",
"#":"\\#",
"~":"\\~",
"_":"\\_",
"&":"\\&",
"%":"\\%",
"|":"{\\tt\\char124}",
"~":"{\\tt\\char126}",
"<":"{\\tt\\char60}",
">":"{\\tt\\char62}",
}
def tex_escape(x):
s = ""
for c in x:
s += _TEX_TR.get(c,c)
return s
def colgen(*lstcol,**kargs):
"""Returns a generator that mixes provided quantities forever
trans: a function to convert the three arguments into a color. lambda x,y,z:(x,y,z) by default"""
if len(lstcol) < 2:
lstcol *= 2
trans = kargs.get("trans", lambda x,y,z: (x,y,z))
while 1:
for i in xrange(len(lstcol)):
for j in xrange(len(lstcol)):
for k in xrange(len(lstcol)):
if i != j or j != k or k != i:
yield trans(lstcol[(i+j)%len(lstcol)],lstcol[(j+k)%len(lstcol)],lstcol[(k+i)%len(lstcol)])
def incremental_label(label="tag%05i", start=0):
while True:
yield label % start
start += 1
# Python <= 2.5 do not provide bin() built-in function
try:
bin(0)
except NameError:
def _binrepr(val):
while val:
yield val & 1
val >>= 1
binrepr = lambda val: "".join(reversed([str(bit) for bit in
_binrepr(val)])) or "0"
else:
binrepr = lambda val: bin(val)[2:]
#########################
#### Enum management ####
#########################
class EnumElement:
_value=None
def __init__(self, key, value):
self._key = key
self._value = value
def __repr__(self):
return "<%s %s[%r]>" % (self.__dict__.get("_name", self.__class__.__name__), self._key, self._value)
def __getattr__(self, attr):
return getattr(self._value, attr)
def __str__(self):
return self._key
def __eq__(self, other):
return self._value == int(other)
class Enum_metaclass(type):
element_class = EnumElement
def __new__(cls, name, bases, dct):
rdict={}
for k,v in dct.iteritems():
if type(v) is int:
v = cls.element_class(k,v)
dct[k] = v
rdict[v] = k
dct["__rdict__"] = rdict
return super(Enum_metaclass, cls).__new__(cls, name, bases, dct)
def __getitem__(self, attr):
return self.__rdict__[attr]
def __contains__(self, val):
return val in self.__rdict__
def get(self, attr, val=None):
return self.__rdict__.get(attr, val)
def __repr__(self):
return "<%s>" % self.__dict__.get("name", self.__name__)
###################
## Object saving ##
###################
def export_object(obj):
print gzip.zlib.compress(cPickle.dumps(obj,2),9).encode("base64")
def import_object(obj=None):
if obj is None:
obj = sys.stdin.read()
return cPickle.loads(gzip.zlib.decompress(obj.strip().decode("base64")))
def save_object(fname, obj):
cPickle.dump(obj,gzip.open(fname,"wb"))
def load_object(fname):
return cPickle.load(gzip.open(fname,"rb"))
@conf.commands.register
def corrupt_bytes(s, p=0.01, n=None):
"""Corrupt a given percentage or number of bytes from a string"""
s = array.array("B",str(s))
l = len(s)
if n is None:
n = max(1,int(l*p))
for i in random.sample(xrange(l), n):
s[i] = (s[i]+random.randint(1,255))%256
return s.tostring()
@conf.commands.register
def corrupt_bits(s, p=0.01, n=None):
"""Flip a given percentage or number of bits from a string"""
s = array.array("B",str(s))
l = len(s)*8
if n is None:
n = max(1,int(l*p))
for i in random.sample(xrange(l), n):
s[i/8] ^= 1 << (i%8)
return s.tostring()
#############################
## pcap capture file stuff ##
#############################
@conf.commands.register
def wrpcap(filename, pkt, *args, **kargs):
"""Write a list of packets to a pcap file
filename: the name of the file to write packets to, or an open,
writable file-like object. The file descriptor will be
closed at the end of the call, so do not use an object you
do not want to close (e.g., running wrpcap(sys.stdout, [])
in interactive mode will crash Scapy).
gz: set to 1 to save a gzipped capture
linktype: force linktype value
endianness: "<" or ">", force endianness
sync: do not bufferize writes to the capture file
"""
with PcapWriter(filename, *args, **kargs) as fdesc:
fdesc.write(pkt)
@conf.commands.register
def rdpcap(filename, count=-1):
"""Read a pcap or pcapng file and return a packet list
count: read only <count> packets
"""
with PcapReader(filename) as fdesc:
return fdesc.read_all(count=count)
class PcapReader_metaclass(type):
"""Metaclass for (Raw)Pcap(Ng)Readers"""
def __new__(cls, name, bases, dct):
"""The `alternative` class attribute is declared in the PcapNg
variant, and set here to the Pcap variant.
"""
newcls = super(PcapReader_metaclass, cls).__new__(cls, name, bases, dct)
if 'alternative' in dct:
dct['alternative'].alternative = newcls
return newcls
def __call__(cls, filename):
"""Creates a cls instance, use the `alternative` if that
fails.
"""
i = cls.__new__(cls, cls.__name__, cls.__bases__, cls.__dict__)
filename, fdesc, magic = cls.open(filename)
try:
i.__init__(filename, fdesc, magic)
except Scapy_Exception:
if "alternative" in cls.__dict__:
cls = cls.__dict__["alternative"]
i = cls.__new__(cls, cls.__name__, cls.__bases__, cls.__dict__)
try:
i.__init__(filename, fdesc, magic)
except Scapy_Exception:
try:
self.f.seek(-4, 1)
except:
pass
raise Scapy_Exception("Not a supported capture file")
return i
@staticmethod
def open(filename):
"""Open (if necessary) filename, and read the magic."""
if isinstance(filename, basestring):
try:
fdesc = gzip.open(filename,"rb")
magic = fdesc.read(4)
except IOError:
fdesc = open(filename,"rb")
magic = fdesc.read(4)
else:
fdesc = filename
filename = (fdesc.name
if hasattr(fdesc, "name") else
"No name")
magic = fdesc.read(4)
return filename, fdesc, magic
class RawPcapReader:
"""A stateful pcap reader. Each packet is returned as a string"""
__metaclass__ = PcapReader_metaclass
def __init__(self, filename, fdesc, magic):
self.filename = filename
self.f = fdesc
if magic == "\xa1\xb2\xc3\xd4": #big endian
self.endian = ">"
elif magic == "\xd4\xc3\xb2\xa1": #little endian
self.endian = "<"
else:
raise Scapy_Exception(
"Not a pcap capture file (bad magic: %r)" % magic
)
hdr = self.f.read(20)
if len(hdr)<20:
raise Scapy_Exception("Invalid pcap file (too short)")
vermaj, vermin, tz, sig, snaplen, linktype = struct.unpack(
self.endian + "HHIIII", hdr
)
self.linktype = linktype
def __iter__(self):
return self
def next(self):
"""implement the iterator protocol on a set of packets in a pcap file"""
pkt = self.read_packet()
if pkt == None:
raise StopIteration
return pkt
def read_packet(self, size=MTU):
"""return a single packet read from the file
returns None when no more packets are available
"""
hdr = self.f.read(16)
if len(hdr) < 16:
return None
sec,usec,caplen,wirelen = struct.unpack(self.endian+"IIII", hdr)
s = self.f.read(caplen)[:size]
return s,(sec,usec,wirelen) # caplen = len(s)
def dispatch(self, callback):
"""call the specified callback routine for each packet read
This is just a convienience function for the main loop
that allows for easy launching of packet processing in a
thread.
"""
for p in self:
callback(p)
def read_all(self,count=-1):
"""return a list of all packets in the pcap file
"""
res=[]
while count != 0:
count -= 1
p = self.read_packet()
if p is None:
break
res.append(p)
return res
def recv(self, size=MTU):
""" Emulate a socket
"""
return self.read_packet(size=size)[0]
def fileno(self):
return self.f.fileno()
def close(self):
return self.f.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tracback):
self.close()
class PcapReader(RawPcapReader):
def __init__(self, filename, fdesc, magic):
RawPcapReader.__init__(self, filename, fdesc, magic)
try:
self.LLcls = conf.l2types[self.linktype]
except KeyError:
warning("PcapReader: unknown LL type [%i]/[%#x]. Using Raw packets" % (self.linktype,self.linktype))
self.LLcls = conf.raw_layer
def read_packet(self, size=MTU):
rp = RawPcapReader.read_packet(self, size=size)
if rp is None:
return None
s,(sec,usec,wirelen) = rp
try:
p = self.LLcls(s)
except KeyboardInterrupt:
raise
except:
if conf.debug_dissector:
raise
p = conf.raw_layer(s)
p.time = sec+0.000001*usec
return p
def read_all(self,count=-1):
res = RawPcapReader.read_all(self, count)
import plist
return plist.PacketList(res,name = os.path.basename(self.filename))
def recv(self, size=MTU):
return self.read_packet(size=size)
class RawPcapNgReader(RawPcapReader):
"""A stateful pcapng reader. Each packet is returned as a
string.
"""
alternative = RawPcapReader
def __init__(self, filename, fdesc, magic):
self.filename = filename
self.f = fdesc
# A list of (linktype, snaplen); will be populated by IDBs.
self.interfaces = []
self.blocktypes = {
1: self.read_block_idb,
6: self.read_block_epb,
}
if magic != "\x0a\x0d\x0d\x0a": # PcapNg:
raise Scapy_Exception(
"Not a pcapng capture file (bad magic: %r)" % magic
)
# see https://github.com/pcapng/pcapng
blocklen, magic = self.f.read(4), self.f.read(4)
if magic == "\x1a\x2b\x3c\x4d":
self.endian = ">"
elif magic == "\x4d\x3c\x2b\x1a":
self.endian = "<"
else:
raise Scapy_Exception("Not a pcapng capture file (bad magic)")
self.f.seek(0)
def read_packet(self, size=MTU):
"""Read blocks until it reaches either EOF or a packet, and
returns None or (packet, (linktype, sec, usec, wirelen)),
where packet is a string.
"""
while True:
try:
blocktype, blocklen = struct.unpack(self.endian + "2I",
self.f.read(8))
except struct.error:
return None
block = self.f.read(blocklen - 12)
try:
if (blocklen,) != struct.unpack(self.endian + 'I',
self.f.read(4)):
raise Scapy_Exception(
"Invalid pcapng block (bad blocklen)"
)
except struct.error:
return None
res = self.blocktypes.get(blocktype,
lambda block, size: None)(block, size)
if res is not None:
return res
def read_block_idb(self, block, _):
"""Interface Description Block"""
self.interfaces.append(struct.unpack(self.endian + "HxxI", block[:8]))
def read_block_epb(self, block, size):
"""Enhanced Packet Block"""
intid, sec, usec, caplen, wirelen = struct.unpack(self.endian + "5I",
block[:20])
return (block[20:20 + caplen][:size],
(self.interfaces[intid][0], sec, usec, wirelen))
class PcapNgReader(RawPcapNgReader):
alternative = PcapReader
def __init__(self, filename, fdesc, magic):
RawPcapNgReader.__init__(self, filename, fdesc, magic)
def read_packet(self, size=MTU):
rp = RawPcapNgReader.read_packet(self, size=size)
if rp is None:
return None
s, (linktype, sec, usec, wirelen) = rp
try:
p = conf.l2types[linktype](s)
except KeyboardInterrupt:
raise
except:
if conf.debug_dissector:
raise
p = conf.raw_layer(s)
p.time = sec+0.000001*usec
return p
def read_all(self,count=-1):
res = RawPcapNgReader.read_all(self, count)
import plist
return plist.PacketList(res, name=os.path.basename(self.filename))
def recv(self, size=MTU):
return self.read_packet()
class RawPcapWriter:
"""A stream PCAP writer with more control than wrpcap()"""
def __init__(self, filename, linktype=None, gz=False, endianness="", append=False, sync=False):
"""
filename: the name of the file to write packets to, or an open,
writable file-like object.
linktype: force linktype to a given value. If None, linktype is taken
from the first writter packet
gz: compress the capture on the fly
endianness: force an endianness (little:"<", big:">"). Default is native
append: append packets to the capture file instead of truncating it
sync: do not bufferize writes to the capture file
"""
self.linktype = linktype
self.header_present = 0
self.append = append
self.gz = gz
self.endian = endianness
self.sync = sync
bufsz=4096
if sync:
bufsz = 0
if isinstance(filename, basestring):
self.filename = filename
self.f = [open,gzip.open][gz](filename,append and "ab" or "wb", gz and 9 or bufsz)
else:
self.f = filename
self.filename = (filename.name
if hasattr(filename, "name") else
"No name")
def fileno(self):
return self.f.fileno()
def _write_header(self, pkt):
self.header_present=1
if self.append:
# Even if prone to race conditions, this seems to be
# safest way to tell whether the header is already present
# because we have to handle compressed streams that
# are not as flexible as basic files
g = [open,gzip.open][self.gz](self.filename,"rb")
if g.read(16):
return
self.f.write(struct.pack(self.endian+"IHHIIII", 0xa1b2c3d4L,
2, 4, 0, 0, MTU, self.linktype))
self.f.flush()
def write(self, pkt):
"""accepts either a single packet or a list of packets to be
written to the dumpfile
"""
if type(pkt) is str:
if not self.header_present:
self._write_header(pkt)
self._write_packet(pkt)
else:
pkt = pkt.__iter__()
if not self.header_present:
try:
p = pkt.next()
except StopIteration:
self._write_header("")
return
self._write_header(p)
self._write_packet(p)
for p in pkt:
self._write_packet(p)
def _write_packet(self, packet, sec=None, usec=None, caplen=None, wirelen=None):
"""writes a single packet to the pcap file
"""
if caplen is None:
caplen = len(packet)
if wirelen is None:
wirelen = caplen
if sec is None or usec is None:
t=time.time()
it = int(t)
if sec is None:
sec = it
if usec is None:
usec = int(round((t-it)*1000000))
self.f.write(struct.pack(self.endian+"IIII", sec, usec, caplen, wirelen))
self.f.write(packet)
if self.sync:
self.f.flush()
def flush(self):
return self.f.flush()
def close(self):
return self.f.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tracback):
self.flush()
self.close()
class PcapWriter(RawPcapWriter):
"""A stream PCAP writer with more control than wrpcap()"""
def _write_header(self, pkt):
if self.linktype == None:
try:
self.linktype = conf.l2types[pkt.__class__]
except KeyError:
warning("PcapWriter: unknown LL type for %s. Using type 1 (Ethernet)" % pkt.__class__.__name__)
self.linktype = 1
RawPcapWriter._write_header(self, pkt)
def _write_packet(self, packet):
sec = int(packet.time)
usec = int(round((packet.time-sec)*1000000))
s = str(packet)
caplen = len(s)
RawPcapWriter._write_packet(self, s, sec, usec, caplen, caplen)
re_extract_hexcap = re.compile("^((0x)?[0-9a-fA-F]{2,}[ :\t]{,3}|) *(([0-9a-fA-F]{2} {,2}){,16})")
def import_hexcap():
p = ""
try:
while 1:
l = raw_input().strip()
try:
p += re_extract_hexcap.match(l).groups()[2]
except:
warning("Parsing error during hexcap")
continue
except EOFError:
pass
p = p.replace(" ","")
return p.decode("hex")
@conf.commands.register
def wireshark(pktlist):
"""Run wireshark on a list of packets"""
f = get_temp_file()
wrpcap(f, pktlist)
subprocess.Popen([conf.prog.wireshark, "-r", f])
@conf.commands.register
def hexedit(x):
x = str(x)
f = get_temp_file()
open(f,"w").write(x)
subprocess.call([conf.prog.hexedit, f])
x = open(f).read()
os.unlink(f)
return x
def __make_table(yfmtfunc, fmtfunc, endline, list, fxyz, sortx=None, sorty=None, seplinefunc=None):
vx = {}
vy = {}
vz = {}
vxf = {}
vyf = {}
l = 0
for e in list:
xx,yy,zz = map(str, fxyz(e))
l = max(len(yy),l)
vx[xx] = max(vx.get(xx,0), len(xx), len(zz))
vy[yy] = None
vz[(xx,yy)] = zz
vxk = vx.keys()
vyk = vy.keys()
if sortx:
vxk.sort(sortx)
else:
try:
vxk.sort(lambda x,y:int(x)-int(y))
except:
try:
vxk.sort(lambda x,y: cmp(atol(x),atol(y)))
except:
vxk.sort()
if sorty:
vyk.sort(sorty)
else:
try:
vyk.sort(lambda x,y:int(x)-int(y))
except:
try:
vyk.sort(lambda x,y: cmp(atol(x),atol(y)))
except:
vyk.sort()
if seplinefunc:
sepline = seplinefunc(l, map(lambda x:vx[x],vxk))
print sepline
fmt = yfmtfunc(l)
print fmt % "",
for x in vxk:
vxf[x] = fmtfunc(vx[x])
print vxf[x] % x,
print endline
if seplinefunc:
print sepline
for y in vyk:
print fmt % y,
for x in vxk:
print vxf[x] % vz.get((x,y), "-"),
print endline
if seplinefunc:
print sepline
def make_table(*args, **kargs):
__make_table(lambda l:"%%-%is" % l, lambda l:"%%-%is" % l, "", *args, **kargs)
def make_lined_table(*args, **kargs):
__make_table(lambda l:"%%-%is |" % l, lambda l:"%%-%is |" % l, "",
seplinefunc=lambda a,x:"+".join(map(lambda y:"-"*(y+2), [a-1]+x+[-2])),
*args, **kargs)
def make_tex_table(*args, **kargs):
__make_table(lambda l: "%s", lambda l: "& %s", "\\\\", seplinefunc=lambda a,x:"\\hline", *args, **kargs)
|
|
"""
SoftLayer Cloud Module
======================
The SoftLayer cloud module is used to control access to the SoftLayer VPS
system.
Use of this module only requires the ``apikey`` parameter. Set up the cloud
configuration at:
``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/softlayer.conf``:
.. code-block:: yaml
my-softlayer-config:
# SoftLayer account api key
user: MYLOGIN
apikey: JVkbSJDGHSDKUKSDJfhsdklfjgsjdkflhjlsdfffhgdgjkenrtuinv
driver: softlayer
The SoftLayer Python Library needs to be installed in order to use the
SoftLayer salt.cloud modules. See: https://pypi.python.org/pypi/SoftLayer
:depends: softlayer
"""
import logging
import time
import salt.config as config
import salt.utils.cloud
from salt.exceptions import SaltCloudSystemExit
# Attempt to import softlayer lib
try:
import SoftLayer
HAS_SLLIBS = True
except ImportError:
HAS_SLLIBS = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = "softlayer"
# Only load in this module if the SoftLayer configurations are in place
def __virtual__():
"""
Check for SoftLayer configurations.
"""
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def _get_active_provider_name():
try:
return __active_provider_name__.value()
except AttributeError:
return __active_provider_name__
def get_configured_provider():
"""
Return the first configured instance.
"""
return config.is_provider_configured(
__opts__, _get_active_provider_name() or __virtualname__, ("apikey",)
)
def get_dependencies():
"""
Warn if dependencies aren't met.
"""
return config.check_driver_dependencies(__virtualname__, {"softlayer": HAS_SLLIBS})
def script(vm_):
"""
Return the script deployment object
"""
deploy_script = salt.utils.cloud.os_script(
config.get_cloud_config_value("script", vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
),
)
return deploy_script
def get_conn(service="SoftLayer_Virtual_Guest"):
"""
Return a conn object for the passed VM data
"""
client = SoftLayer.Client(
username=config.get_cloud_config_value(
"user", get_configured_provider(), __opts__, search_global=False
),
api_key=config.get_cloud_config_value(
"apikey", get_configured_provider(), __opts__, search_global=False
),
)
return client[service]
def avail_locations(call=None):
"""
List all available locations
"""
if call == "action":
raise SaltCloudSystemExit(
"The avail_locations function must be called with "
"-f or --function, or with the --list-locations option"
)
ret = {}
conn = get_conn()
response = conn.getCreateObjectOptions()
# return response
for datacenter in response["datacenters"]:
# return data center
ret[datacenter["template"]["datacenter"]["name"]] = {
"name": datacenter["template"]["datacenter"]["name"],
}
return ret
def avail_sizes(call=None):
"""
Return a dict of all available VM sizes on the cloud provider with
relevant data. This data is provided in three dicts.
"""
if call == "action":
raise SaltCloudSystemExit(
"The avail_sizes function must be called with "
"-f or --function, or with the --list-sizes option"
)
ret = {
"block devices": {},
"memory": {},
"processors": {},
}
conn = get_conn()
response = conn.getCreateObjectOptions()
for device in response["blockDevices"]:
# return device['template']['blockDevices']
ret["block devices"][device["itemPrice"]["item"]["description"]] = {
"name": device["itemPrice"]["item"]["description"],
"capacity": device["template"]["blockDevices"][0]["diskImage"]["capacity"],
}
for memory in response["memory"]:
ret["memory"][memory["itemPrice"]["item"]["description"]] = {
"name": memory["itemPrice"]["item"]["description"],
"maxMemory": memory["template"]["maxMemory"],
}
for processors in response["processors"]:
ret["processors"][processors["itemPrice"]["item"]["description"]] = {
"name": processors["itemPrice"]["item"]["description"],
"start cpus": processors["template"]["startCpus"],
}
return ret
def avail_images(call=None):
"""
Return a dict of all available VM images on the cloud provider.
"""
if call == "action":
raise SaltCloudSystemExit(
"The avail_images function must be called with "
"-f or --function, or with the --list-images option"
)
ret = {}
conn = get_conn()
response = conn.getCreateObjectOptions()
for image in response["operatingSystems"]:
ret[image["itemPrice"]["item"]["description"]] = {
"name": image["itemPrice"]["item"]["description"],
"template": image["template"]["operatingSystemReferenceCode"],
}
return ret
def list_custom_images(call=None):
"""
Return a dict of all custom VM images on the cloud provider.
"""
if call != "function":
raise SaltCloudSystemExit(
"The list_vlans function must be called with -f or --function."
)
ret = {}
conn = get_conn("SoftLayer_Account")
response = conn.getBlockDeviceTemplateGroups()
for image in response:
if "globalIdentifier" not in image:
continue
ret[image["name"]] = {
"id": image["id"],
"name": image["name"],
"globalIdentifier": image["globalIdentifier"],
}
if "note" in image:
ret[image["name"]]["note"] = image["note"]
return ret
def get_location(vm_=None):
"""
Return the location to use, in this order:
- CLI parameter
- VM parameter
- Cloud profile setting
"""
return __opts__.get(
"location",
config.get_cloud_config_value(
"location",
vm_ or get_configured_provider(),
__opts__,
# default=DEFAULT_LOCATION,
search_global=False,
),
)
def create(vm_):
"""
Create a single VM from a data dict
"""
try:
# Check for required profile parameters before sending any API calls.
if (
vm_["profile"]
and config.is_profile_configured(
__opts__,
_get_active_provider_name() or "softlayer",
vm_["profile"],
vm_=vm_,
)
is False
):
return False
except AttributeError:
pass
name = vm_["name"]
hostname = name
domain = config.get_cloud_config_value("domain", vm_, __opts__, default=None)
if domain is None:
SaltCloudSystemExit("A domain name is required for the SoftLayer driver.")
if vm_.get("use_fqdn"):
name = ".".join([name, domain])
vm_["name"] = name
__utils__["cloud.fire_event"](
"event",
"starting create",
"salt/cloud/{}/creating".format(name),
args=__utils__["cloud.filter_event"](
"creating", vm_, ["name", "profile", "provider", "driver"]
),
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
log.info("Creating Cloud VM %s", name)
conn = get_conn()
kwargs = {
"hostname": hostname,
"domain": domain,
"startCpus": vm_["cpu_number"],
"maxMemory": vm_["ram"],
"hourlyBillingFlag": vm_["hourly_billing"],
}
local_disk_flag = config.get_cloud_config_value(
"local_disk", vm_, __opts__, default=False
)
kwargs["localDiskFlag"] = local_disk_flag
if "image" in vm_:
kwargs["operatingSystemReferenceCode"] = vm_["image"]
kwargs["blockDevices"] = []
disks = vm_["disk_size"]
if isinstance(disks, int):
disks = [str(disks)]
elif isinstance(disks, str):
disks = [size.strip() for size in disks.split(",")]
count = 0
for disk in disks:
# device number '1' is reserved for the SWAP disk
if count == 1:
count += 1
block_device = {
"device": str(count),
"diskImage": {"capacity": str(disk)},
}
kwargs["blockDevices"].append(block_device)
count += 1
# Upper bound must be 5 as we're skipping '1' for the SWAP disk ID
if count > 5:
log.warning(
"More that 5 disks were specified for %s ."
"The first 5 disks will be applied to the VM, "
"but the remaining disks will be ignored.\n"
"Please adjust your cloud configuration to only "
"specify a maximum of 5 disks.",
name,
)
break
elif "global_identifier" in vm_:
kwargs["blockDeviceTemplateGroup"] = {
"globalIdentifier": vm_["global_identifier"]
}
location = get_location(vm_)
if location:
kwargs["datacenter"] = {"name": location}
private_vlan = config.get_cloud_config_value(
"private_vlan", vm_, __opts__, default=False
)
if private_vlan:
kwargs["primaryBackendNetworkComponent"] = {"networkVlan": {"id": private_vlan}}
private_network = config.get_cloud_config_value(
"private_network", vm_, __opts__, default=False
)
if bool(private_network) is True:
kwargs["privateNetworkOnlyFlag"] = "True"
public_vlan = config.get_cloud_config_value(
"public_vlan", vm_, __opts__, default=False
)
if public_vlan:
kwargs["primaryNetworkComponent"] = {"networkVlan": {"id": public_vlan}}
public_security_groups = config.get_cloud_config_value(
"public_security_groups", vm_, __opts__, default=False
)
if public_security_groups:
secgroups = [
{"securityGroup": {"id": int(sg)}} for sg in public_security_groups
]
pnc = kwargs.get("primaryNetworkComponent", {})
pnc["securityGroupBindings"] = secgroups
kwargs.update({"primaryNetworkComponent": pnc})
private_security_groups = config.get_cloud_config_value(
"private_security_groups", vm_, __opts__, default=False
)
if private_security_groups:
secgroups = [
{"securityGroup": {"id": int(sg)}} for sg in private_security_groups
]
pbnc = kwargs.get("primaryBackendNetworkComponent", {})
pbnc["securityGroupBindings"] = secgroups
kwargs.update({"primaryBackendNetworkComponent": pbnc})
max_net_speed = config.get_cloud_config_value(
"max_net_speed", vm_, __opts__, default=10
)
if max_net_speed:
kwargs["networkComponents"] = [{"maxSpeed": int(max_net_speed)}]
post_uri = config.get_cloud_config_value("post_uri", vm_, __opts__, default=None)
if post_uri:
kwargs["postInstallScriptUri"] = post_uri
dedicated_host_id = config.get_cloud_config_value(
"dedicated_host_id", vm_, __opts__, default=None
)
if dedicated_host_id:
kwargs["dedicatedHost"] = {"id": dedicated_host_id}
__utils__["cloud.fire_event"](
"event",
"requesting instance",
"salt/cloud/{}/requesting".format(name),
args={
"kwargs": __utils__["cloud.filter_event"](
"requesting", kwargs, list(kwargs)
),
},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
try:
response = conn.createObject(kwargs)
except Exception as exc: # pylint: disable=broad-except
log.error(
"Error creating %s on SoftLayer\n\n"
"The following exception was thrown when trying to "
"run the initial deployment: \n%s",
name,
exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG,
)
return False
ip_type = "primaryIpAddress"
private_ssh = config.get_cloud_config_value(
"private_ssh", vm_, __opts__, default=False
)
private_wds = config.get_cloud_config_value(
"private_windows", vm_, __opts__, default=False
)
if private_ssh or private_wds or public_vlan is None:
ip_type = "primaryBackendIpAddress"
def wait_for_ip():
"""
Wait for the IP address to become available
"""
nodes = list_nodes_full()
if ip_type in nodes[hostname]:
return nodes[hostname][ip_type]
time.sleep(1)
return False
ip_address = salt.utils.cloud.wait_for_fun(
wait_for_ip,
timeout=config.get_cloud_config_value(
"wait_for_fun_timeout", vm_, __opts__, default=15 * 60
),
)
if config.get_cloud_config_value("deploy", vm_, __opts__) is not True:
return show_instance(hostname, call="action")
SSH_PORT = 22
WINDOWS_DS_PORT = 445
managing_port = SSH_PORT
if config.get_cloud_config_value(
"windows", vm_, __opts__
) or config.get_cloud_config_value("win_installer", vm_, __opts__):
managing_port = WINDOWS_DS_PORT
ssh_connect_timeout = config.get_cloud_config_value(
"ssh_connect_timeout", vm_, __opts__, 15 * 60
)
connect_timeout = config.get_cloud_config_value(
"connect_timeout", vm_, __opts__, ssh_connect_timeout
)
if not salt.utils.cloud.wait_for_port(
ip_address, port=managing_port, timeout=connect_timeout
):
raise SaltCloudSystemExit("Failed to authenticate against remote ssh")
pass_conn = get_conn(service="SoftLayer_Account")
mask = {
"virtualGuests": {"powerState": "", "operatingSystem": {"passwords": ""}},
}
def get_credentials():
"""
Wait for the password to become available
"""
node_info = pass_conn.getVirtualGuests(id=response["id"], mask=mask)
for node in node_info:
if (
node["id"] == response["id"]
and "passwords" in node["operatingSystem"]
and node["operatingSystem"]["passwords"]
):
return (
node["operatingSystem"]["passwords"][0]["username"],
node["operatingSystem"]["passwords"][0]["password"],
)
time.sleep(5)
return False
username, passwd = salt.utils.cloud.wait_for_fun( # pylint: disable=W0633
get_credentials,
timeout=config.get_cloud_config_value(
"wait_for_fun_timeout", vm_, __opts__, default=15 * 60
),
)
response["username"] = username
response["password"] = passwd
response["public_ip"] = ip_address
ssh_username = config.get_cloud_config_value(
"ssh_username", vm_, __opts__, default=username
)
vm_["ssh_host"] = ip_address
vm_["password"] = passwd
ret = __utils__["cloud.bootstrap"](vm_, __opts__)
ret.update(response)
__utils__["cloud.fire_event"](
"event",
"created instance",
"salt/cloud/{}/created".format(name),
args=__utils__["cloud.filter_event"](
"created", vm_, ["name", "profile", "provider", "driver"]
),
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
return ret
def list_nodes_full(mask="mask[id]", call=None):
"""
Return a list of the VMs that are on the provider
"""
if call == "action":
raise SaltCloudSystemExit(
"The list_nodes_full function must be called with -f or --function."
)
ret = {}
conn = get_conn(service="SoftLayer_Account")
response = conn.getVirtualGuests()
for node_id in response:
hostname = node_id["hostname"]
ret[hostname] = node_id
__utils__["cloud.cache_node_list"](
ret, _get_active_provider_name().split(":")[0], __opts__
)
return ret
def list_nodes(call=None):
"""
Return a list of the VMs that are on the provider
"""
if call == "action":
raise SaltCloudSystemExit(
"The list_nodes function must be called with -f or --function."
)
ret = {}
nodes = list_nodes_full()
if "error" in nodes:
raise SaltCloudSystemExit(
"An error occurred while listing nodes: {}".format(
nodes["error"]["Errors"]["Error"]["Message"]
)
)
for node in nodes:
ret[node] = {
"id": nodes[node]["hostname"],
"ram": nodes[node]["maxMemory"],
"cpus": nodes[node]["maxCpu"],
}
if "primaryIpAddress" in nodes[node]:
ret[node]["public_ips"] = nodes[node]["primaryIpAddress"]
if "primaryBackendIpAddress" in nodes[node]:
ret[node]["private_ips"] = nodes[node]["primaryBackendIpAddress"]
if "status" in nodes[node]:
ret[node]["state"] = str(nodes[node]["status"]["name"])
return ret
def list_nodes_select(call=None):
"""
Return a list of the VMs that are on the provider, with select fields
"""
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__["query.selection"],
call,
)
def show_instance(name, call=None):
"""
Show the details from SoftLayer concerning a guest
"""
if call != "action":
raise SaltCloudSystemExit(
"The show_instance action must be called with -a or --action."
)
nodes = list_nodes_full()
__utils__["cloud.cache_node"](nodes[name], _get_active_provider_name(), __opts__)
return nodes[name]
def destroy(name, call=None):
"""
Destroy a node.
CLI Example:
.. code-block:: bash
salt-cloud --destroy mymachine
"""
if call == "function":
raise SaltCloudSystemExit(
"The destroy action must be called with -d, --destroy, -a or --action."
)
__utils__["cloud.fire_event"](
"event",
"destroying instance",
"salt/cloud/{}/destroying".format(name),
args={"name": name},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
node = show_instance(name, call="action")
conn = get_conn()
response = conn.deleteObject(id=node["id"])
__utils__["cloud.fire_event"](
"event",
"destroyed instance",
"salt/cloud/{}/destroyed".format(name),
args={"name": name},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
if __opts__.get("update_cachedir", False) is True:
__utils__["cloud.delete_minion_cachedir"](
name, _get_active_provider_name().split(":")[0], __opts__
)
return response
def list_vlans(call=None):
"""
List all VLANs associated with the account
"""
if call != "function":
raise SaltCloudSystemExit(
"The list_vlans function must be called with -f or --function."
)
conn = get_conn(service="SoftLayer_Account")
return conn.getNetworkVlans()
|
|
import sys
import time
import os
import serial
import random
import RPi.GPIO as GPIO
import Adafruit_MPR121.MPR121 as MPR121
import threading
from threading import Thread
flag1 = 0
flag2 = 0
randomInt = 0
ser = serial.Serial('/dev/ttyACM0', 9600)
#Defining audio files
#darkSide
def bidding():
global flag1
os.system('aplay audio/darkSide/bidding.wav')
flag1 = 0
def breath():
global flag1
os.system('aplay audio/darkSide/breath.wav')
flag1 = 0
def darkside():
global flag1
os.system('aplay audio/darkSide/darkside.wav')
flag1 = 0
def DarthLsbr():
global flag1
os.system('aplay audio/darkSide/DarthLsbr.wav')
flag1 = 0
def dontfail():
global flag1
os.system('aplay audio/darkSide/dontfail.wav')
flag1 = 0
def failed():
global flag1
os.system('aplay audio/darkSide/failed.wav')
flag1 = 0
def father():
global flag1
os.system('aplay audio/darkSide/father.wav')
flag1 = 0
def haveyou():
global flag1
os.system('aplay audio/darkSide/haveyou.wav')
flag1 = 0
def noescape():
global flag1
os.system('aplay audio/darkSide/noescape.wav')
flag1 = 0
def power():
global flag1
os.system('aplay audio/darkSide/power.wav')
flag1 = 0
def theforce():
global flag1
os.system('aplay audio/darkSide/theforce.wav')
flag1 = 0
def vader():
global flag1
os.system('aplay audio/darkSide/vader.wav')
flag1 = 0
def waiting():
global flag1
os.system('aplay audio/darkSide/waiting.wav')
flag1 = 0
#empire
def imperial():
global flag1
os.system('aplay audio/empire/imperial.wav')
flag1 = 0
#lightSide
def impossible():
global flag1
os.system('aplay audio/lightSide/impossible.wav')
flag1 = 0
def LukeLsbr():
global flag1
os.system('aplay audio/lightSide/LukeLsbr.wav')
flag1 = 0
def yoda():
global flag1
os.system('aplay audio/lightSide/yoda.wav')
flag1 = 0
def disturb():
global flag1
os.system('aplay audio/lightSide/disturb.wav')
flag1 = 0
def force1():
global flag1
os.system('aplay audio/lightSide/force1.wav')
flag1 = 0
def force2():
global flag1
os.system('aplay audio/lightSide/force2.wav')
flag1 = 0
def strongam():
global flag1
os.system('aplay audio/lightSide/strongam.wav')
flag1 = 0
def try_not():
global flag1
os.system('aplay audio/lightSide/try_not.wav')
flag1 = 0
#logo
def mainthemeshort():
global flag1
os.system('aplay audio/logo/mainthemeshort.wav')
flag1 = 0
#rebel
def c3po():
global flag1
os.system('aplay audio/rebel/c3po.wav')
flag1 = 0
def helpme():
global flag1
os.system('aplay audio/rebel/helpme.wav')
flag1 = 0
def r2d2_1():
global flag1
os.system('aplay audio/rebel/r2d2_1.wav')
flag1 = 0
def r2d2_2():
global flag1
os.system('aplay audio/rebel/r2d2_2.wav')
flag1 = 0
def roar():
global flag1
os.system('aplay audio/rebel/roar.wav')
flag1 = 0
def stuck_up():
global flag1
os.system('aplay audio/rebel/stuck_up.wav')
flag1 = 0
def thankme():
global flag1
os.system('aplay audio/rebel/thankme.wav')
flag1 = 0
#Defining Light functions
#darkSide
def biddingLight():
global flag2
ser.write('a')
flag2 = 0
def breathLight():
global flag2
ser.write('b')
flag2 = 0
def darksideLight():
global flag2
ser.write('c')
flag2 = 0
def DarthLsbrLight():
global flag2
ser.write('d')
flag2 = 0
def dontfailLight():
global flag2
ser.write('e')
flag2 = 0
def failedLight():
global flag2
ser.write('f')
flag2 = 0
def fatherLight():
global flag2
ser.write('g')
flag2 = 0
def haveyouLight():
global flag2
ser.write('h')
flag2 = 0
def noescapeLight():
global flag2
ser.write('i')
flag2 = 0
def powerLight():
global flag2
ser.write('j')
flag2 = 0
def theforceLight():
global flag2
ser.write('k')
flag2 = 0
def vaderLight():
global flag2
ser.write('l')
flag2 = 0
def waitingLight():
global flag2
ser.write('m')
flag2 = 0
#empire
def imperialLight():
global flag2
ser.write('n')
flag2 = 0
#lightSide
def impossibleLight():
global flag2
ser.write('o')
flag2 = 0
def LukeLsbrLight():
global flag2
ser.write('p')
flag2 = 0
def yodaLight():
global flag2
ser.write('q')
flag2 = 0
def disturbLight():
global flag2
ser.write('r')
flag2 = 0
def force1Light():
global flag2
ser.write('s')
flag2 = 0
def force2Light():
global flag2
ser.write('t')
flag2 = 0
def strongamLight():
global flag2
ser.write('u')
flag2 = 0
def try_notLight():
global flag2
ser.write('v')
flag2 = 0
#logo
def mainthemeshortLight():
global flag2
ser.write('w')
flag2 = 0
#rebel
def c3poLight():
global flag2
ser.write('x')
flag2 = 0
def helpmeLight():
global flag2
ser.write('y')
flag2 = 0
def r2d2_1Light():
global flag2
ser.write('z')
flag2 = 0
def r2d2_2Light():
global flag2
ser.write('{')
flag2 = 0
def roarLight():
global flag2
ser.write('|')
flag2 = 0
def stuck_upLight():
global flag2
ser.write('}')
flag2 = 0
def thankmeLight():
global flag2
ser.write('~')
flag2 = 0
print('Adafruit MPR121 Capacitive Touch Sensor Test')
# Create MPR121 instance.
cap = MPR121.MPR121()
if not cap.begin():
print('Error initializing MPR121. Check your wiring!')
sys.exit(1)
# Main loop to print a message every time a pin is touched.
print('Press Ctrl-C to quit.')
last_touched = cap.touched()
while True:
current_touched = cap.touched()
time.sleep(0.25)
randomInt = 0
#darkSide
if cap.is_touched(8) and flag1 == 0 and flag2 == 0:
print('Pin 8 is being touched!')
flag1 = 1
flag2 = 1
#ser.write('1-13')
randomInt = random.randint(1, 13)
print(randomInt)
#Thread(target = yoda).start()
#Thread(target = greenLight).start()
if randomInt == 1:
Thread(target = bidding).start()
Thread(target = biddingLight).start()
elif randomInt == 2:
Thread(target = breath).start()
Thread(target = breathLight).start()
elif randomInt == 3:
Thread(target = darkside).start()
Thread(target = darksideLight).start()
elif randomInt == 4:
Thread(target = DarthLsbr).start()
Thread(target = DarthLsbrLight).start()
elif randomInt == 5:
Thread(target = dontfail).start()
Thread(target = dontfailLight).start()
elif randomInt == 6:
Thread(target = failed).start()
Thread(target = failedLight).start()
elif randomInt == 7:
Thread(target = father).start()
Thread(target = fatherLight).start()
elif randomInt == 8:
Thread(target = haveyou).start()
Thread(target = haveyouLight).start()
elif randomInt == 9:
Thread(target = noescape).start()
Thread(target = noescapeLight).start()
elif randomInt == 10:
Thread(target = power).start()
Thread(target = powerLight).start()
elif randomInt == 11:
Thread(target = theforce).start()
Thread(target = theforceLight).start()
elif randomInt == 12:
Thread(target = vader).start()
Thread(target = vaderLight).start()
elif randomInt == 13:
Thread(target = waiting).start()
Thread(target = waitingLight).start()
#empire
if cap.is_touched(5) and flag1 == 0 and flag2 == 0:
print('Pin 5 is being touched!')
flag1 = 1
flag2 = 1
#ser.write('14')
Thread(target = imperial).start()
Thread(target = imperialLight).start()
#lightSide
if cap.is_touched(11) and flag1 == 0 and flag2 == 0:
print('Pin 11 is being touched!')
flag1 = 1
flag2 = 1
#ser.write('15-22')
randomInt = random.randint(15, 22)
print(randomInt)
#Thread(target = logo).start()
#Thread(target = logoLight).start()
if randomInt == 15:
Thread(target = impossible).start()
Thread(target = impossibleLight).start()
elif randomInt == 16:
Thread(target = LukeLsbr).start()
Thread(target = LukeLsbrLight).start()
elif randomInt == 17:
Thread(target = yoda).start()
Thread(target = yodaLight).start()
elif randomInt == 18:
Thread(target = disturb).start()
Thread(target = disturbLight).start()
elif randomInt == 19:
Thread(target = force1).start()
Thread(target = force1Light).start()
elif randomInt == 20:
Thread(target = force2).start()
Thread(target = force2Light).start()
elif randomInt == 21:
Thread(target = strongam).start()
Thread(target = strongamLight).start()
elif randomInt == 22:
Thread(target = try_not).start()
Thread(target = try_notLight).start()
#logo
if cap.is_touched(0) and flag1 == 0 and flag2 == 0:
print('Pin 0 is being touched!')
flag1 = 1
flag2 = 1
#ser.write('23')
Thread(target = mainthemeshort).start()
Thread(target = mainthemeshortLight).start()
#rebel
if cap.is_touched(4) and flag1 == 0 and flag2 == 0:
print('Pin 4 is being touched!')
flag1 = 1
flag2 = 1
#ser.write('24-30')
randomInt = random.randint(24, 30)
print(randomInt)
#Thread(target = vaderBreathing).start()
#Thread(target = redLight).start()
if randomInt == 24:
Thread(target = c3po).start()
Thread(target = c3poLight).start()
elif randomInt == 25:
Thread(target = helpme).start()
Thread(target = helpmeLight).start()
elif randomInt == 26:
Thread(target = r2d2_1).start()
Thread(target = r2d2_1Light).start()
elif randomInt == 27:
Thread(target = r2d2_2).start()
Thread(target = r2d2_2Light).start()
elif randomInt == 28:
Thread(target = roar).start()
Thread(target = roarLight).start()
elif randomInt == 29:
Thread(target = stuck_up).start()
Thread(target = stuck_upLight).start()
elif randomInt == 30:
Thread(target = thankme).start()
Thread(target = thankmeLight).start()
|
|
# Alan Richardson (Ausar Geophysical)
# 2017/01/31
import numpy as np
import scipy.signal
import pandas as pd
from sklearn import preprocessing, metrics
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.base import clone
from matplotlib import pyplot as plt
import scipy.optimize
from scipy.optimize import lsq_linear
import fastdtw
from scipy.sparse import csr_matrix
from scipy.sparse.linalg import lsqr
from scipy.signal import medfilt, gaussian
import xgboost as xgb
from xgboost.sklearn import XGBClassifier, XGBRegressor
eps = 1e-5
def load_data():
train_data = pd.read_csv('../facies_vectors.csv');
train_data = train_data[train_data['Well Name'] != 'Recruit F9'].reset_index(drop=True)
validation_data = pd.read_csv('../validation_data_nofacies.csv')
return pd.concat([train_data, validation_data]).reset_index(drop=True)
def get_wellnames(data):
return data['Well Name'].unique()
def get_numwells(data):
return len(get_wellnames(data))
def set_well_value(data, wellname, colname, val):
data.loc[data['Well Name']==wellname, colname] = val
def get_well_value(data, wellname, colname):
return data.loc[data['Well Name']==wellname, colname].values[0]
def make_label_encoders(data, names):
les=[]
for name in names:
le=preprocessing.LabelEncoder()
le.fit(data[name])
les.append({'name': name, 'le': le})
return les
def apply_label_encoders(data, les):
for le in les:
data['%sClass' % le['name']]=le['le'].transform(data[le['name']])
data.drop(le['name'], axis=1, inplace=True)
def make_onehot_encoders(data, names):
ohes = []
for name in names:
ohe=preprocessing.OneHotEncoder()
ohe.fit(data[name])
ohes.append({'name': name, 'ohe': ohe})
return ohes
def apply_onehot_encoders(data, ohes):
for ohe in ohes:
ohdata = pd.DataFrame(ohe['ohe'].transform(data[ohe['name']]).toarray())
data=data.join(ohdata)
data.drop(ohe['name'],axis=1,inplace=True)
return data
def make_scalers(data, names, stype='Robust'):
scalers = []
for name in names:
if (stype == 'Robust'):
scaler = preprocessing.RobustScaler()
elif (stype == 'Standard'):
scaler = preprocessing.StandardScaler()
else:
raise ValueError('unknown stype')
scaler.fit(data[name].dropna(axis=0, inplace=False).values.reshape(-1, 1))
scalers.append({'name': name, 'scaler': scaler})
return scalers
def apply_scalers(data, scalers):
for scaler in scalers:
data.loc[~data[scaler['name']].isnull(), scaler['name']] = scaler['scaler'].transform(data[scaler['name']].dropna(axis=0, inplace=False).values.reshape(-1,1))
def neigh_interp(data):
odata = load_data()
wellnames = get_wellnames(data)
formations = data['FormationClass'].unique()
distformation=np.load('dtw_distformation_fce.npy')
distformation[pd.isnull(distformation)]=0.0
# distformation is upper triangular, so add the transpose to make it full
distformationf = np.zeros([len(wellnames),len(wellnames),len(formations)])
for fidx in range(len(formations)):
distformationf[:,:,fidx] = distformation[:,:,fidx]+distformation[:,:,fidx].T
# We don't have facies for wells 9 or 10, so we don't want any other well
# to have these as one of their nearest neighbours
distformationf[:,9,:]=np.inf
distformationf[:,10,:]=np.inf
# We also don't want a well to be its own neighbour
distformationf[distformationf==0.0]=np.inf
data['NeighbFacies']=0
k=8
clf = KNeighborsClassifier(n_neighbors = k, weights = 'distance', leaf_size = 1, p = 1)
cols = ['GR', 'ILD_log10', 'PHIND', 'RELPOS', 'NM_MClass', 'RGT']
for wellidx in range(len(wellnames)):
for fidx in formations:
# Find the k 'nearest' (as determined by dtw) wells
neighb = np.argsort(distformationf[wellidx,:,formations.tolist().index(fidx)])[:k]
# Find the rows in data for these wells
neighb_rows = np.array([False]*len(data))
for nidx in neighb:
neighb_rows = neighb_rows | (data['Well Name']==wellnames[nidx])
# Select only those rows with formation 'fidx'
neighb_rows = neighb_rows & (data['FormationClass']==fidx)
# Rows for the chosen formation in the current well
my_rows = (data['Well Name']==wellnames[wellidx]) & (data['FormationClass']==fidx)
# Fit and predict
if (np.sum(neighb_rows)>0) & (np.sum(my_rows)>0):
clf.fit(data.loc[neighb_rows, cols].values, odata.loc[neighb_rows, 'Facies'].values.ravel())
data.loc[my_rows, 'NeighbFacies'] = clf.predict(data.loc[my_rows, cols].values)
# Start of functions associated with finding RGT
def get_pts_per_well(data):
npts_in_well = data.groupby('Well Name', sort=False).size().values
cum_pts = np.append([0],np.cumsum(npts_in_well))
return npts_in_well, cum_pts
def build_Adtw(data, wells, nwells, npts_in_well, cum_pts, cols):
formations = data['FormationClass'].unique()
max_num_pairs = int(nwells * (nwells-1) / 2 * np.max(npts_in_well))
max_nz_in_row = int(np.max(npts_in_well) * 2)
max_num_rows = max_num_pairs
max_num_nonzero = max_num_rows * max_nz_in_row
dist = np.zeros([len(wells),len(wells)])
distformation = np.zeros([len(wells),len(wells),len(formations)])
indices = np.zeros(max_num_nonzero,dtype=int)
indptr = np.zeros(max_num_rows+1,dtype=int)
Adata = np.zeros(max_num_nonzero)
b = np.zeros(max_num_rows)
bounds = np.ones(len(data))
nz_rows = 0
nz_indices = 0
def add_shift_sum(Adata, indices, nz_indices, i, path, cum_pts, wellidx, idx):
col0 = cum_pts[wellidx]
col1 = cum_pts[wellidx] + path[i][idx]
num_added_indices = col1 - col0 + 1
indices[nz_indices:nz_indices+num_added_indices] = np.arange(col0, col1+1)
#1-2*idx so when idx=0 put +1 in Adata, when idx=1 put -1 in Adata
Adata[nz_indices:nz_indices+num_added_indices] = np.ones(num_added_indices)*(1-2*idx)
return num_added_indices
def add_row (Adata, indices, indptr, b, nz_rows, nz_indices, i, path, cum_pts, wellidxs):
num_added_indices = 0
indptr[nz_rows] = nz_indices
for idx in [0,1]:
num_added_indices = add_shift_sum(Adata, indices, nz_indices, i, path, cum_pts, wellidxs[idx], idx)
nz_indices = nz_indices + num_added_indices
b[nz_rows] = 0.0
return nz_indices
weightsum = 0.0
for well1idx in range(nwells-1):
for well2idx in range(well1idx+1, nwells):
w1df = data.loc[data['Well Name'] == wells[well1idx], cols + ['FormationClass']]
w2df = data.loc[data['Well Name'] == wells[well2idx], cols + ['FormationClass']]
w1formations = w1df['FormationClass'].unique()
w2formations = w2df['FormationClass'].unique()
nzcols = []
path = []
for col in cols:
if (np.all(np.isfinite(w1df[col])) & np.all(np.isfinite(w2df[col]))):
nzcols.append(col)
for formation in formations:
if (formation in w1formations) & (formation in w2formations):
w1f = w1df.loc[w1df['FormationClass'] == formation, nzcols]
w2f = w2df.loc[w2df['FormationClass'] == formation, nzcols]
w1 = np.array(w1f.values)
w2 = np.array(w2f.values)
dist_tmp, path_tmp = fastdtw.dtw(w1, w2, 2)
dist[well1idx,well2idx] += dist_tmp
distformation[well1idx,well2idx,formations.tolist().index(formation)] = dist_tmp
for pair in path_tmp:
idx1 = w1f.index[pair[0]]-w1df.index[0]
idx2 = w2f.index[pair[1]]-w2df.index[0]
path.append((idx1, idx2))
bounds[cum_pts[well1idx]] = np.max([bounds[cum_pts[well1idx]], path[0][1]])
bounds[cum_pts[well2idx]] = np.max([bounds[cum_pts[well2idx]], path[0][0]])
#NOTE delete
#np.save('path_%d_%d_fce.npy' % (well1idx, well2idx), path, allow_pickle = False)
pre_nz_rows = nz_rows
pre_nz_indices = nz_indices
added_1=-1
added_2=-1
for i in range(len(path)):
if ((path[i][0] != added_1) & (path[i][1] != added_2)):
if ((i > 0) & (i < len(path)-1)):
if (((path[i][0] != path[i-1][0]) & (path[i][1] != path[i+1][1])) | ((path[i][0] != path[i+1][0]) & (path[i][1] != path[i-1][1]))):
nz_indices = add_row(Adata, indices, indptr, b, nz_rows, nz_indices, i, path, cum_pts, [well1idx, well2idx])
nz_rows = nz_rows + 1
added_1 = path[i][0]
added_2 = path[i][1]
elif (i>0):
if ((path[i][0] != path[i-1][0]) & (path[i][1] != path[i-1][1])):
nz_indices = add_row(Adata, indices, indptr, b, nz_rows, nz_indices, i, path, cum_pts, [well1idx, well2idx])
nz_rows = nz_rows + 1
added_1 = path[i][0]
added_2 = path[i][1]
else:
if ((path[i][0] != path[i+1][0]) & (path[i][1] != path[i+1][1])):
nz_indices = add_row(Adata, indices, indptr, b, nz_rows, nz_indices, i, path, cum_pts, [well1idx, well2idx])
nz_rows = nz_rows + 1
added_1 = path[i][0]
added_2 = path[i][1]
num_matched_pairs = nz_rows - pre_nz_rows + 1
p = 2.0
weight = num_matched_pairs * (num_matched_pairs/dist[well1idx, well2idx])**(2.0/p)
weightsum = weightsum + weight
Adata[pre_nz_indices : nz_indices] = Adata[pre_nz_indices : nz_indices] * weight
Adata[:nz_indices] = Adata[:nz_indices] / weightsum
indptr[nz_rows] = nz_indices
indptr = indptr[:nz_rows+1]
np.save('dtw_dist_fce.npy', dist)
np.save('dtw_distformation_fce.npy', distformation)
return Adata, indices, indptr, b, bounds, nz_rows, nz_indices
def create_Ab(Adata, indices, indptr, b, nz_rows, nz_indices):
Adata = Adata[:nz_indices]
indices = indices[:nz_indices]
b = b[:nz_rows]
A = csr_matrix((Adata, indices, indptr), dtype=float)
return A, b, Adata, indices
def solve_Ax(A, b, bounds, data, wells, nwells, npts_in_well, cum_pts, reg_start_row, its=1):
res = lsq_linear(A,b,bounds=(bounds, 100.0*np.ones(len(data))),verbose=2,lsmr_tol='auto',max_iter=its)
wellnames = data['Well Name'].unique()
k = 0
for i, wellname in enumerate(wellnames):
wl = len(data.loc[data['Well Name'] == wellname])
rgt = np.cumsum(res.x[k:k+wl])
data.loc[data['Well Name'] == wellname, 'RGT'] = rgt
k = k+wl
def find_rgt(data, names, its):
wellnames = get_wellnames(data)
numwells = get_numwells(data)
npts_in_well, cum_pts = get_pts_per_well(data)
Adata, indices, indptr, b, bounds, dtw_rows, dtw_indices = build_Adtw(data, wellnames, numwells, npts_in_well, cum_pts, names)
A, b, Adata, indices = create_Ab(Adata, indices, indptr, b, dtw_rows, dtw_indices)
solve_Ax(A, b, bounds, data, wellnames, numwells, npts_in_well, cum_pts, dtw_rows, its)
# End of RGT functions
# Start of feature engineering functions
def find_dist(data):
wellnames = get_wellnames(data)
numwells = get_numwells(data)
dist = np.load('dtw_dist_fce.npy')
dist[pd.isnull(dist)]=0.0
distf = dist + dist.T
numpairs = int(numwells * (numwells-1) / 2)
A = np.zeros([numpairs, numwells], dtype=int)
b = np.zeros(numpairs)
row = 0
for well1idx in range(numwells-1):
for well2idx in range(well1idx+1, numwells):
A[row, well1idx] = 1
A[row, well2idx] = -1
b[row] = distf[well1idx, well2idx]
row += 1
dist = lsqr(A,b)
for well1idx in range(numwells):
set_well_value(data, wellnames[well1idx], 'X1D', dist[0][well1idx])
def interval_cols(intervals):
cols = []
for interval in intervals:
for metric in ['Depth','RGT']:
cols.append('%sFromPrev%sChange' % (metric, interval))
cols.append('%sToNext%sChange' % (metric, interval))
cols.append('%sToNearest%sChange' % (metric, interval))
cols.append('FracThrough%s%s' % (metric, interval))
cols.append('%sSize%s' % (interval, metric))
cols.append('Next%s' % interval)
cols.append('Prev%s' % interval)
cols.append('%sCompaction' % interval)
return cols
def interval_fe(data, intervals):
for interval in intervals:
for metric in ['Depth','RGT']:
df = data.groupby(['Well Name',interval],sort=False)[metric].min().reset_index()
df.columns = ['Well Name',interval,'%sPrev%sChange' % (metric, interval)]
data = pd.merge(data,df,how='left',on = ['Well Name',interval])
df = data.groupby(['Well Name',interval],sort=False)[metric].max().reset_index()
df.columns = ['Well Name',interval,'Max%sBefore%sChange' % (metric, interval)]
data = pd.merge(data,df,how='left',on = ['Well Name',interval])
# Set next change to be prev change of next interval. This will cause 'NaN' at the end of each well, so fill those with the max of the interval
df = data.groupby(['Well Name',interval],sort=False)['%sPrev%sChange' % (metric, interval)].first().reset_index()
df['%sNext%sChange' % (metric, interval)] = df['%sPrev%sChange' % (metric, interval)].shift(-1).reset_index(drop=True)
df.drop('%sPrev%sChange' % (metric, interval),axis=1,inplace=True)
df = df.groupby(['Well Name',interval],sort=False).first()
for wellname in df.index.levels[0]:
df.loc[wellname,df.loc[wellname].index[-1]] = np.nan
df = df.reset_index()
data = pd.merge(data,df,how='left',on = ['Well Name', interval])
data.loc[data['%sNext%sChange' % (metric, interval)].isnull(),'%sNext%sChange' % (metric, interval)] = data.loc[data['%sNext%sChange' % (metric, interval)].isnull(),'Max%sBefore%sChange' % (metric, interval)]
#IntervalSizeMetric
data['%sSize%s'%(interval,metric)] = data['%sNext%sChange'%(metric,interval)] - data['%sPrev%sChange'%(metric,interval)]
#MetricFromPrevIntervalChange
data['%sFromPrev%sChange' % (metric,interval)] = data[metric] - data['%sPrev%sChange' % (metric,interval)]
#MetricToNextIntervalChange
data['%sToNext%sChange' % (metric,interval)] = data['%sNext%sChange' % (metric,interval)] - data[metric]
#MetricToNearestIntervalChange
data['%sToNearest%sChange' % (metric,interval)] = data[['%sToNext%sChange' % (metric,interval), '%sFromPrev%sChange' % (metric,interval)]].min(axis=1)
#FracThroughMetricInterval
data['FracThrough%s%s' % (metric,interval)] = (data[metric] - data['%sPrev%sChange'%(metric,interval)]) / (data['%sSize%s'%(interval,metric)]+eps)
#Next/PrevInterval
intervalClass = data.groupby(['Well Name', interval],sort=False)[interval].first()
intervalClass.name = 'Shift%s' %interval
nextIntervalClass = intervalClass.shift(-1).reset_index()
prevIntervalClass = intervalClass.shift(1).reset_index()
nextIntervalClass.columns = ['Well Name',interval,'Next%s'%interval]
prevIntervalClass.columns = ['Well Name',interval,'Prev%s'%interval]
nextIntervalClass.loc[nextIntervalClass['Next%s'%interval].isnull(),'Next%s'%interval] = nextIntervalClass.loc[nextIntervalClass['Next%s'%interval].isnull(),interval]
prevIntervalClass.loc[prevIntervalClass['Prev%s'%interval].isnull(),'Prev%s'%interval] = prevIntervalClass.loc[prevIntervalClass['Prev%s'%interval].isnull(),interval]
data = pd.merge(data,nextIntervalClass,how='left',on = ['Well Name', interval])
data = pd.merge(data,prevIntervalClass,how='left',on = ['Well Name', interval])
#Compaction
data['%sCompaction'%interval] = data['%sSizeRGT'%interval] / (data['%sSizeDepth'%interval] + eps)
return data
def measurement_cols(ms):
cols = []
for m in ms:
cols.append('MedFilt%s' % m)
cols.append('Diff%s' % m)
cols.append('Diff2%s' % m)
cols.append('Sharp%s' % m)
return cols
def measurement_fe(data, ms):
dfg = data.groupby('Well Name')
for m in ms:
#MedFilt NOTE WINDOW CHOICE
for name,group in dfg[m]:
data.loc[data['Well Name']==name,'MedFilt%s'%m] = medfilt(group,15)
#Diff
for name,group in dfg[m]:
data.loc[data['Well Name']==name,'Diff%s'%m] = np.gradient(group)
#Diff2
for name,group in dfg['Diff%s'%m]:
data.loc[data['Well Name']==name,'Diff2%s'%m] = np.gradient(group)
#Sharp
data['Sharp%s' %m] = data[m] - data['Diff2%s' % m]
return data
def interval_measurement_cols(intervals, ms):
cols = []
for interval in intervals:
for m in ms:
cols.append('Mean%s%s' % (interval, m))
cols.append('DiffMean%s%s' % (interval, m))
if (interval != 'Local'):
cols.append('Std%s%s' % (interval, m))
cols.append('FracStd%s%s' % (interval, m))
return cols
def interval_measurement_fe(data, intervals, ms):
for interval in intervals:
for m in ms:
# Get dataframe group and rows
dfg = None
def rows(data, name):
return None
if (interval == 'Well') | (interval == 'Local'):
dfg = data.groupby('Well Name')
def rows(data, name):
return data['Well Name']==name
else:
dfg = data.groupby(['Well Name', interval])
def rows(data, name):
return (data['Well Name']==name[0]) & (data[interval]==name[1])
# Compute mean and standard deviation
if (interval != 'Local'):
#MeanInterval
for name,group in dfg[m]:
data.loc[rows(data, name),'Mean%s%s'% (interval, m)] = np.mean(group)
#StdInterval
for name,group in dfg[m]:
data.loc[rows(data, name),'Std%s%s'% (interval, m)] = np.std(group)
else:
#MeanLocal NOTE WINDOW CHOICE
gauss = gaussian(5,1)
gauss /= np.sum(gauss)
for name,group in dfg[m]:
data.loc[rows(data, name),'MeanLocal%s'%m] = np.convolve(group,gauss,'same')
#DiffMeanInterval
data['DiffMean%s%s'% (interval, m)] = data[m] - data['Mean%s%s'% (interval, m)]
#FracStdInterval
if (interval != 'Local'):
data['FracStd%s%s'% (interval, m)] = data['DiffMean%s%s'% (interval, m)] / (data['Std%s%s'% (interval, m)]+eps)
return data
def basic_feature_engineering(data):
cols = ['X1D', 'Formation3Depth', 'DepthFromSurf', 'WellFracMarine', 'FormationFracMarine', 'DepthFromSurf_divby_RGT', 'FormationSizeDepth_rel_av', 'FormationSizeRGT_rel_av', 'DiffRGT', 'IGR', 'VShaleClavier']
# Give unique values to each NM_M interval so they can be distinguished below
# Very hacky method for doing it...
nmclasssep = np.zeros(len(data['NM_MClass']))
nmclasssep[1:] = np.cumsum(np.abs(np.diff(data['NM_MClass'].values)))
nmclasssep[0] = nmclasssep[1]
data['NM_MClassSep'] = nmclasssep
intervals = ['FormationClass', 'NM_MClassSep']
intervals_measurement = intervals + ['Well', 'Local']
cols += interval_cols(intervals)
ms=[u'GR', u'ILD_log10', u'DeltaPHI', u'PHIND', u'RELPOS']
cols += measurement_cols(ms)
cols += interval_measurement_cols(intervals_measurement, ms)
# X1D
find_dist(data)
# Formation3Depth
df = data.loc[data['FormationClass']==3].groupby(['Well Name'],sort=False)['Depth'].min().reset_index()
df.columns = ['Well Name','Formation3Depth']
data = pd.merge(data,df,how='left',on = 'Well Name')
# DepthFromSurf
df = data.groupby(['Well Name'],sort=False)['Depth'].min().reset_index()
df.columns = ['Well Name','SurfDepth']
data = pd.merge(data,df,how='left',on = ['Well Name'])
data['DepthFromSurf'] = data['Depth']-data['SurfDepth']
data.drop('SurfDepth',axis=1,inplace=True)
# WellFracMarine
df = data.groupby(['Well Name'],sort=False)['NM_MClass'].mean().reset_index()
df.columns = ['Well Name','WellFracMarine']
data = pd.merge(data,df,how='left',on = ['Well Name'])
# FormationFracMarine
df = data.groupby(['Well Name', 'FormationClass'],sort=False)['NM_MClass'].mean().reset_index()
df.columns = ['Well Name','FormationClass','FormationFracMarine']
data = pd.merge(data,df,how='left',on = ['Well Name', 'FormationClass'])
#DepthFromSurf_divby_RGT
data['DepthFromSurf_divby_RGT'] = data['DepthFromSurf']/data['RGT']
#DiffRGT
wellrgt = data.groupby(['Well Name'],sort=False)['RGT']
for name,group in wellrgt:
data.loc[data['Well Name']==name,'DiffRGT'] = np.gradient(group)
# Intervals
data = interval_fe(data, intervals)
# Remove useless columns
cols.remove('NextNM_MClassSep')
cols.remove('PrevNM_MClassSep')
# FormationSizeDepth_rel_av
mss=data.groupby(['Well Name','FormationClass'])['FormationClassSizeDepth'].first().reset_index().groupby('FormationClass').mean().values
data['FormationSizeDepth_rel_av']=data['FormationClassSizeDepth'].values/mss[data['FormationClass'].values.astype(int)].ravel()
# FormationSizeRGT_rel_av
mss=data.groupby(['Well Name','FormationClass'])['FormationClassSizeRGT'].first().reset_index().groupby('FormationClass').mean().values
data['FormationSizeRGT_rel_av']=data['FormationClassSizeRGT'].values/mss[data['FormationClass'].values.astype(int)].ravel()
#Measurements
data = measurement_fe(data, ms)
data = interval_measurement_fe(data, intervals_measurement, ms)
#IGR
data['IGR'] = (data['MedFiltGR']-data['MedFiltGR'].min())/(data['MedFiltGR'].max()-data['MedFiltGR'].min())
#VShaleClavier
data['VShaleClavier'] = 1.7 * np.sqrt(3.38 - (data['IGR']+0.7)**2)
return cols, data
def predict_pe_feature_engineering(data):
cols = []
intervals = ['Facies']
cols += interval_cols(intervals)
ms=[u'GR', u'ILD_log10', u'DeltaPHI', u'PHIND', u'RELPOS']
cols += interval_measurement_cols(intervals, ms)
data = interval_fe(data, intervals)
data = interval_measurement_fe(data, intervals, ms)
return cols, data
def predict_facies2_feature_engineering(data):
cols = []
intervals = ['FormationClass', 'NM_MClassSep', 'Well', 'Local']
ms=['PE']
cols += measurement_cols(ms)
cols += interval_measurement_cols(intervals, ms)
data = measurement_fe(data, ms)
data = interval_measurement_fe(data, intervals, ms)
return cols, data
def make_classifier(data, Xcols, Ycols, rows, clf):
clf.fit(data.loc[~rows, Xcols], data.loc[~rows, Ycols])
return clf
def classify(data, clf, Xcols, Ycols, rows):
data.loc[rows, Ycols] = clf.predict(data.loc[rows, Xcols])
def make_regressor(data, Xcols, Ycols, rows, reg):
reg.fit(data.loc[~rows, Xcols], data.loc[~rows, Ycols])
return reg
def regress(data, reg, Xcols, Ycols, rows):
data.loc[rows, Ycols] = reg.predict(data.loc[rows, Xcols])
#NOTE seeds
def run(solve_rgt=False):
# Load + preprocessing
odata = load_data()
if (solve_rgt):
data = load_data()
le = make_label_encoders(data, ['Formation', 'NM_M'])
apply_label_encoders(data, le)
scalers = make_scalers(data, ['GR', 'ILD_log10', 'DeltaPHI', 'PHIND', 'RELPOS', 'PE', 'FormationClass', u'NM_MClass', u'Facies'])
apply_scalers(data, scalers)
#NOTE Max its
find_rgt(data, [u'DeltaPHI', u'Facies', u'GR', u'ILD_log10', u'NM_MClass', u'PE', u'PHIND', u'RELPOS'], 1)
else:
data = pd.read_csv('dtw_out_fce_14000.csv')
data.drop(u'Unnamed: 0', axis=1, inplace=True)
# Reset Facies back to their unscaled values
data['Facies']=odata['Facies'].values
scalers = make_scalers(data, ['RGT'], stype='Standard')
apply_scalers(data, scalers)
neigh_interp(data)
cols = ['DeltaPHI', 'GR', 'ILD_log10', 'PHIND', 'RELPOS', 'FormationClass', 'NM_MClass', 'RGT', 'NeighbFacies']
basic_cols, data = basic_feature_engineering(data)
cols += basic_cols
seed1=0
seed2=0
seed3=0
facies_rows_to_predict = data['Facies'].isnull()
pe_rows_to_predict = data['PE'].isnull()
clf1 = XGBClassifier(base_score=0.5, colsample_bylevel=0.5, colsample_bytree=0.6, gamma=0.01, learning_rate=0.025, max_delta_step=0, max_depth=2, min_child_weight=7, missing=None, n_estimators=500, nthread=-1, objective='multi:softprob', reg_alpha=2, reg_lambda=20, scale_pos_weight=1, seed=seed1, silent=True, subsample=0.2)
clf2 = XGBClassifier(base_score=0.5, colsample_bylevel=0.3, colsample_bytree=0.8,
gamma=0.01, learning_rate=0.05, max_delta_step=0, max_depth=3,
min_child_weight=1, missing=None, n_estimators=500, nthread=-1,
objective='multi:softprob', reg_alpha=0, reg_lambda=1,
scale_pos_weight=1, seed=seed2, silent=True, subsample=0.5)
reg1 = XGBRegressor(base_score=0.5, colsample_bylevel=0.5, colsample_bytree=0.1,
gamma=0, learning_rate=0.05, max_delta_step=0, max_depth=1,
min_child_weight=10, missing=None, n_estimators=500, nthread=-1,
objective='reg:linear', reg_alpha=10, reg_lambda=10,
scale_pos_weight=1, seed=seed3, silent=True, subsample=0.1)
# Predict facies #1
Ycol = 'Facies'
Xcols = cols
clf = make_classifier(data, Xcols, Ycol, facies_rows_to_predict, clf1)
classify(data, clf, Xcols, Ycol, facies_rows_to_predict)
for wellname in get_wellnames(data):
wd = data.loc[data['Well Name'] == wellname, 'Facies']
wd = medfilt(wd, kernel_size=5)
data.loc[data['Well Name'] == wellname, 'Facies1'] = wd
cols += ['Facies1']
# Predict PE
predict_pe_cols, data = predict_pe_feature_engineering(data)
cols += predict_pe_cols
Ycol = 'PE'
Xcols = cols
reg = make_regressor(data, Xcols, Ycol, pe_rows_to_predict, reg1)
regress(data, reg, Xcols, Ycol, pe_rows_to_predict)
cols += ['PE']
# Predict facies #2
predict_facies2_cols, data = predict_facies2_feature_engineering(data)
cols += predict_facies2_cols
Ycol = 'Facies'
Xcols = cols
clf = make_classifier(data, Xcols, Ycol, facies_rows_to_predict, clf2)
classify(data, clf, Xcols, Ycol, facies_rows_to_predict)
for wellname in get_wellnames(data):
wd = data.loc[data['Well Name'] == wellname, 'Facies']
wd = medfilt(wd, kernel_size=7)
data.loc[data['Well Name'] == wellname, 'Facies'] = wd
data = data.loc[(data['Well Name'] == 'STUART') | (data['Well Name'] == 'CRAWFORD'),['Well Name','Depth','Facies']]
data.to_csv('ar4_submission3.csv')
if __name__ == "__main__":
run()
|
|
# vim: set et ai ts=4 sts=4 sw=4:
"""JKS/JCEKS file format decoder. Use in conjunction with PyOpenSSL
to translate to PEM, or load private key and certs directly into
openssl structs and wrap sockets.
Notes on Python2/3 compatibility:
Whereever possible, we rely on the 'natural' byte string
representation of each Python version, i.e. 'str' in Python2 and
'bytes' in Python3.
Python2.6+ aliases the 'bytes' type to 'str', so we can universally
write bytes(...) or b"" to get each version's natural byte string
representation.
The libraries we interact with are written to expect these natural
types in their respective Py2/Py3 versions, so this works well.
Things get slightly more complicated when we need to manipulate
individual bytes from a byte string. str[x] returns a 'str' in Python2
and an 'int' in Python3. You can't do 'int' operations on a 'str' and
vice-versa, so we need some form of common data type. We use
bytearray() for this purpose; in both Python2 and Python3, this will
return individual elements as an 'int'.
"""
from __future__ import print_function
import struct
import hashlib
import javaobj
import time
from pyasn1.codec.der import encoder
from pyasn1_modules import rfc5208, rfc2459
from pyasn1.type import univ, namedtype
from . import rfc2898
from . import sun_crypto
from .base import *
from .util import *
try:
from StringIO import StringIO as BytesIO # python 2
except ImportError:
from io import BytesIO # python3
MAGIC_NUMBER_JKS = b4.pack(0xFEEDFEED)
MAGIC_NUMBER_JCEKS = b4.pack(0xCECECECE)
SIGNATURE_WHITENING = b"Mighty Aphrodite"
class TrustedCertEntry(AbstractKeystoreEntry):
"""Represents a trusted certificate entry in a JKS or JCEKS keystore."""
def __init__(self, **kwargs):
super(TrustedCertEntry, self).__init__(**kwargs)
self.type = kwargs.get("type")
"""A string indicating the type of certificate. Unless in exotic applications, this is usually ``X.509``."""
self.cert = kwargs.get("cert")
"""A byte string containing the actual certificate data. In the case of X.509 certificates, this is the DER-encoded
X.509 representation of the certificate."""
@classmethod
def new(cls, alias, cert):
"""
Helper function to create a new TrustedCertEntry.
:param str alias: The alias for the Trusted Cert Entry
:param str certs: The certificate, as a byte string.
:returns: A loaded :class:`TrustedCertEntry` instance, ready
to be placed in a keystore.
"""
timestamp = int(time.time()) * 1000
tke = cls(timestamp = timestamp,
alias = alias,
cert = cert,
type = "X.509")
return tke
def is_decrypted(self):
"""Always returns ``True`` for this entry type."""
return True
def decrypt(self, key_password):
"""Does nothing for this entry type; certificates are inherently public data and are not stored in encrypted form."""
return
def _encrypt_for(self, store_type, key_password):
"""Does nothing for this entry type; certificates are inherently public data and are not stored in encrypted form."""
return
class PrivateKeyEntry(AbstractKeystoreEntry):
"""Represents a private key entry in a JKS or JCEKS keystore (e.g. an RSA or DSA private key)."""
def __init__(self, **kwargs):
super(PrivateKeyEntry, self).__init__(**kwargs)
self.cert_chain = kwargs.get("cert_chain")
"""
A list of tuples, representing the certificate chain associated with the private key. Each element of the list is a 2-tuple
containing the following data:
- ``[0]``: A string indicating the type of certificate. Unless in exotic applications, this is usually ``X.509``.
- ``[1]``: A byte string containing the actual certificate data. In the case of X.509 certificates, this is the DER-encoded X.509 representation of the certificate.
"""
self._encrypted = kwargs.get("encrypted")
self._pkey = kwargs.get("pkey")
self._pkey_pkcs8 = kwargs.get("pkey_pkcs8")
self._algorithm_oid = kwargs.get("algorithm_oid")
@classmethod
def new(cls, alias, certs, key, key_format='pkcs8'):
"""
Helper function to create a new PrivateKeyEntry.
:param str alias: The alias for the Private Key Entry
:param list certs: An list of certificates, as byte strings.
The first one should be the one belonging to the private key,
the others the chain (in correct order).
:param str key: A byte string containing the private key in the
format specified in the key_format parameter (default pkcs8).
:param str key_format: The format of the provided private key.
Valid options are pkcs8 or rsa_raw. Defaults to pkcs8.
:returns: A loaded :class:`PrivateKeyEntry` instance, ready
to be placed in a keystore.
:raises UnsupportedKeyFormatException: If the key format is
unsupported.
"""
timestamp = int(time.time()) * 1000
cert_chain = []
for cert in certs:
cert_chain.append(('X.509', cert))
pke = cls(timestamp = timestamp,
alias = alias,
cert_chain = cert_chain)
if key_format == 'pkcs8':
try:
private_key_info = asn1_checked_decode(key, asn1Spec=rfc5208.PrivateKeyInfo())
pke._algorithm_oid = private_key_info['privateKeyAlgorithm']['algorithm'].asTuple()
pke.pkey = private_key_info['privateKey'].asOctets()
pke.pkey_pkcs8 = key
except PyAsn1Error as e:
raise BadKeyEncodingException("Failed to parse provided key as a PKCS#8 PrivateKeyInfo structure", e)
elif key_format == 'rsa_raw':
pke._algorithm_oid = RSA_ENCRYPTION_OID
# We must encode it to pkcs8
private_key_info = rfc5208.PrivateKeyInfo()
private_key_info.setComponentByName('version','v1')
a = rfc2459.AlgorithmIdentifier()
a.setComponentByName('algorithm', pke._algorithm_oid)
a.setComponentByName('parameters', univ.Any(encoder.encode(univ.Null())))
private_key_info.setComponentByName('privateKeyAlgorithm', a)
private_key_info.setComponentByName('privateKey', key)
pke.pkey_pkcs8 = encoder.encode(private_key_info)
pke.pkey = key
else:
raise UnsupportedKeyFormatException("Key Format '%s' is not supported" % key_format)
return pke
def __getattr__(self, name):
if not self.is_decrypted():
raise NotYetDecryptedException("Cannot access attribute '%s'; entry not yet decrypted, call decrypt() with the correct password first" % name)
return self.__dict__['_' + name]
def is_decrypted(self):
return (not self._encrypted)
def decrypt(self, key_password):
"""
Decrypts the entry using the given password. Has no effect if the entry has already been decrypted.
:param str key_password: The password to decrypt the entry with. If the entry was loaded from a JCEKS keystore,
the password must not contain any characters outside of the ASCII character set.
:raises DecryptionFailureException: If the entry could not be decrypted using the given password.
:raises UnexpectedAlgorithmException: If the entry was encrypted with an unknown or unexpected algorithm
:raise ValueError: If the entry was loaded from a JCEKS keystore and the password contains non-ASCII characters.
"""
if self.is_decrypted():
return
encrypted_info = None
try:
encrypted_info = asn1_checked_decode(self._encrypted, asn1Spec=rfc5208.EncryptedPrivateKeyInfo())
except PyAsn1Error as e:
raise DecryptionFailureException("Failed to decrypt data for private key '%s': %s" % (self.alias, str(e),), e)
algo_id = encrypted_info['encryptionAlgorithm']['algorithm'].asTuple()
algo_params = encrypted_info['encryptionAlgorithm']['parameters'].asOctets()
encrypted_private_key = encrypted_info['encryptedData'].asOctets()
plaintext = None
try:
if algo_id == sun_crypto.SUN_JKS_ALGO_ID:
plaintext = sun_crypto.jks_pkey_decrypt(encrypted_private_key, key_password)
elif algo_id == sun_crypto.SUN_JCE_ALGO_ID:
if self.store_type != "jceks":
raise UnexpectedAlgorithmException("Encountered JCEKS private key protection algorithm in JKS keystore")
# see RFC 2898, section A.3: PBES1 and definitions of AlgorithmIdentifier and PBEParameter
params = asn1_checked_decode(algo_params, asn1Spec=rfc2898.PBEParameter())
salt = params['salt'].asOctets()
iteration_count = int(params['iterationCount'])
plaintext = sun_crypto.jce_pbe_decrypt(encrypted_private_key, key_password, salt, iteration_count)
else:
raise UnexpectedAlgorithmException("Unknown %s private key protection algorithm: %s" % (self.store_type.upper(), algo_id))
except (BadHashCheckException, BadPaddingException):
raise DecryptionFailureException("Failed to decrypt data for private key '%s'; wrong password?" % self.alias)
# In JCEKS stores, the key protection scheme is password-based encryption with PKCS#5/7 padding, so wrong passwords have
# a 1/256? chance of producing a 0x01 byte as the last byte and passing the padding check but producing garbage plaintext.
# The plaintext should be a DER-encoded PKCS#8 PrivateKeyInfo, so try to parse it as such; if that fails, then
# either the password was wrong and we hit a 1/256 case, or the password was right and the data is genuinely corrupt.
# In sane use cases the latter shouldn't happen, so let's assume the former.
private_key_info = None
try:
private_key_info = asn1_checked_decode(plaintext, asn1Spec=rfc5208.PrivateKeyInfo())
except PyAsn1Error as e:
raise DecryptionFailureException("Failed to decrypt data for private key '%s'; wrong password?" % (self.alias,))
key = private_key_info['privateKey'].asOctets()
algorithm_oid = private_key_info['privateKeyAlgorithm']['algorithm'].asTuple()
self._encrypted = None
self._pkey = key
self._pkey_pkcs8 = plaintext
self._algorithm_oid = algorithm_oid
def _encrypt_for(self, store_type, key_password):
"""
Encrypts the private key, so that it can be saved to a keystore.
This will make it necessary to decrypt it again if it is going to be used later.
Has no effect if the entry is already encrypted.
:param str key_password: The password to encrypt the entry with.
"""
if not self.is_decrypted():
return self._encrypted
ciphertext = None
a = rfc2459.AlgorithmIdentifier()
if store_type == "jks":
ciphertext = sun_crypto.jks_pkey_encrypt(self.pkey_pkcs8, key_password)
a.setComponentByName('algorithm', sun_crypto.SUN_JKS_ALGO_ID)
a.setComponentByName('parameters', univ.Any(encoder.encode(univ.Null())))
elif store_type == "jceks":
ciphertext, salt, iteration_count = sun_crypto.jce_pbe_encrypt(self.pkey_pkcs8, key_password)
pbe_params = rfc2898.PBEParameter()
pbe_params.setComponentByName('salt', salt)
pbe_params.setComponentByName('iterationCount', iteration_count)
a.setComponentByName('algorithm', sun_crypto.SUN_JCE_ALGO_ID)
a.setComponentByName('parameters', encoder.encode(pbe_params))
else:
raise UnsupportedKeystoreTypeException("Cannot encrypt entries of this type for storage in '%s' keystores; can only encrypt for JKS and JCEKS stores" % (store_type,))
epki = rfc5208.EncryptedPrivateKeyInfo()
epki.setComponentByName('encryptionAlgorithm', a)
epki.setComponentByName('encryptedData', ciphertext)
return encoder.encode(epki)
is_decrypted.__doc__ = AbstractKeystoreEntry.is_decrypted.__doc__
class SecretKeyEntry(AbstractKeystoreEntry):
"""Represents a secret (symmetric) key entry in a JCEKS keystore (e.g. an AES or DES key)."""
_classdesc_KR = None
_classdesc_KRT = None
_classdesc_SOFKP = None
_classdesc_SKS = None
def __init__(self, **kwargs):
super(SecretKeyEntry, self).__init__(**kwargs)
self._encrypted = kwargs.get("sealed_obj")
self._algorithm = kwargs.get("algorithm")
self._key = kwargs.get("key")
self._key_size = kwargs.get("key_size")
@classmethod
def new(cls, alias, algorithm, key):
"""
Helper function to create a new SecretKeyEntry.
:returns: A loaded :class:`SecretKeyEntry` instance, ready
to be placed in a keystore.
"""
timestamp = int(time.time()) * 1000
ske = cls(algorithm=algorithm, key=key, key_size=len(key)*8, timestamp=timestamp, alias=alias)
return ske
def __getattr__(self, name):
if not self.is_decrypted():
raise NotYetDecryptedException("Cannot access attribute '%s'; entry not yet decrypted, call decrypt() with the correct password first" % name)
return self.__dict__['_' + name]
def is_decrypted(self):
return (not self._encrypted)
def decrypt(self, key_password):
"""
Decrypts the entry using the given password. Has no effect if the entry has already been decrypted.
:param str key_password: The password to decrypt the entry with. Must not contain any characters outside
of the ASCII character set.
:raises DecryptionFailureException: If the entry could not be decrypted using the given password.
:raises UnexpectedAlgorithmException: If the entry was encrypted with an unknown or unexpected algorithm
:raise ValueError: If the password contains non-ASCII characters.
"""
if self.is_decrypted():
return
sealed_obj = self._encrypted
# convert between javaobj's internal byte[] representation and a Python bytes() object
encodedParams = java2bytes(sealed_obj.encodedParams)
encryptedContent = java2bytes(sealed_obj.encryptedContent)
plaintext = None
if sealed_obj.sealAlg == "PBEWithMD5AndTripleDES":
# if the object was sealed with PBEWithMD5AndTripleDES
# then the parameters should apply to the same algorithm
# and not be empty or null
if sealed_obj.paramsAlg != sealed_obj.sealAlg:
raise UnexpectedAlgorithmException("Unexpected parameters algorithm used in SealedObject; should match sealing algorithm '%s' but found '%s'" % (sealed_obj.sealAlg, sealed_obj.paramsAlg))
if encodedParams is None or len(encodedParams) == 0:
raise UnexpectedJavaTypeException("No parameters found in SealedObject instance for sealing algorithm '%s'; need at least a salt and iteration count to decrypt" % sealed_obj.sealAlg)
params_asn1 = asn1_checked_decode(encodedParams, asn1Spec=rfc2898.PBEParameter())
salt = params_asn1['salt'].asOctets()
iteration_count = int(params_asn1['iterationCount'])
try:
plaintext = sun_crypto.jce_pbe_decrypt(encryptedContent, key_password, salt, iteration_count)
except sun_crypto.BadPaddingException:
raise DecryptionFailureException("Failed to decrypt data for secret key '%s'; bad password?" % self.alias)
else:
raise UnexpectedAlgorithmException("Unexpected algorithm used for encrypting SealedObject: sealAlg=%s" % sealed_obj.sealAlg)
# The plaintext here is another serialized Java object; this
# time it's an object implementing the javax.crypto.SecretKey
# interface. When using the default SunJCE provider, these
# are usually either javax.crypto.spec.SecretKeySpec objects,
# or some other specialized ones like those found in the
# com.sun.crypto.provider package (e.g. DESKey and DESedeKey).
#
# Additionally, things are further complicated by the fact
# that some of these specialized SecretKey implementations
# (i.e. other than SecretKeySpec) implement a writeReplace()
# method, causing Java's serialization runtime to swap out the
# object for a completely different one at serialization time.
# Again for SunJCE, the substitute object that gets serialized
# is usually a java.security.KeyRep object.
obj = None
try:
obj, dummy = KeyStore._read_java_obj(plaintext, 0)
except Exception as e:
# at this point, either a wrong password was used that happened to decrypt into valid padding bytes, or the content of the entry is
# not a valid secret key.
raise DecryptionFailureException("Failed to decrypt data for secret key '%s'; wrong password or corrupted entry data?" % (self.alias,))
clazz = obj.get_class()
if clazz.name == "javax.crypto.spec.SecretKeySpec":
algorithm = obj.algorithm
key = java2bytes(obj.key)
key_size = len(key)*8
elif clazz.name == "java.security.KeyRep":
assert (obj.type.constant == "SECRET"), "Expected value 'SECRET' for KeyRep.type enum value, found '%s'" % obj.type.constant
key_bytes = java2bytes(obj.encoded)
key_encoding = obj.format
if key_encoding == "RAW":
pass # ok, no further processing needed
elif key_encoding == "X.509":
raise NotImplementedError("X.509 encoding for KeyRep objects not yet implemented")
elif key_encoding == "PKCS#8":
raise NotImplementedError("PKCS#8 encoding for KeyRep objects not yet implemented")
else:
raise UnexpectedKeyEncodingException("Unexpected key encoding '%s' found in serialized java.security.KeyRep object; expected one of 'RAW', 'X.509', 'PKCS#8'." % key_encoding)
algorithm = obj.algorithm
key = key_bytes
key_size = len(key)*8
else:
raise UnexpectedJavaTypeException("Unexpected object of type '%s' found inside SealedObject; don't know how to handle it" % clazz.name)
self._encrypted = None
self._algorithm = algorithm
self._key = key
self._key_size = key_size
is_decrypted.__doc__ = AbstractKeystoreEntry.is_decrypted.__doc__
def _encrypt_for(self, store_type, key_password):
if not self.is_decrypted():
return self._encrypted
# build a plaintext Java object to hold the key and some metadata
plaintext_obj = None
if "DES" in self.algorithm:
plaintext_obj = self._java_KeyRep(self.algorithm, self.key, "RAW", "SECRET")
else:
plaintext_obj = self._java_SecretKeySpec(self.algorithm, self.key)
plaintext = javaobj.dumps(plaintext_obj)
# now encrypt the serialized plaintext object, and store the result in a SealedObjectForKeyProtector object
ciphertext, salt, iteration_count = sun_crypto.jce_pbe_encrypt(plaintext, key_password)
params = rfc2898.PBEParameter()
params.setComponentByName('salt', salt)
params.setComponentByName('iterationCount', iteration_count)
params = encoder.encode(params)
sealed_obj = self._java_SealedObjectForKeyProtector(ciphertext, params, "PBEWithMD5AndTripleDES", "PBEWithMD5AndTripleDES")
return sealed_obj
@classmethod
def _java_SealedObjectForKeyProtector(cls, encryptedContent, encodedParams, paramsAlg, sealAlg):
"""
Constructs and returns a javaobj.JavaObject representation of a SealedObjectForKeyProtector object with the given parameters
:param bytes encryptedContent: The serialized underlying object, in encrypted format.
:param bytes encodedParams: The cryptographic parameters used by the sealing Cipher, encoded in the default format
:param str paramsAlg: Name of the encryption method (as known to Java) for which the parameters are valid.
:param str sealAlg: Name of the encryption method (as known to Java) that was used to encrypt the serialized underlying object.
"""
if not cls._classdesc_SOFKP:
classdesc_SO = javaobj.JavaClass()
classdesc_SO.name = "javax.crypto.SealedObject"
classdesc_SO.serialVersionUID = 4482838265551344752
classdesc_SO.flags = javaobj.JavaObjectConstants.SC_SERIALIZABLE
classdesc_SO.fields_names = ['encodedParams', 'encryptedContent', 'paramsAlg', 'sealAlg']
classdesc_SO.fields_types = ['[B', '[B', 'Ljava/lang/String;', 'Ljava/lang/String;']
cls._classdesc_SOFKP = javaobj.JavaClass()
cls._classdesc_SOFKP.name = "com.sun.crypto.provider.SealedObjectForKeyProtector"
cls._classdesc_SOFKP.serialVersionUID = -3650226485480866989
cls._classdesc_SOFKP.flags = javaobj.JavaObjectConstants.SC_SERIALIZABLE
cls._classdesc_SOFKP.superclass = classdesc_SO
obj = javaobj.JavaObject()
obj.classdesc = cls._classdesc_SOFKP
obj.encryptedContent = bytes2java(encryptedContent)
obj.encodedParams = bytes2java(encodedParams)
obj.paramsAlg = javaobj.JavaString(paramsAlg)
obj.sealAlg = javaobj.JavaString(sealAlg)
return obj
@classmethod
def _java_SecretKeySpec(cls, algorithm, key):
if not cls._classdesc_SKS:
cls._classdesc_SKS = javaobj.JavaClass()
cls._classdesc_SKS.name = "javax.crypto.spec.SecretKeySpec"
cls._classdesc_SKS.serialVersionUID = 6577238317307289933
cls._classdesc_SKS.flags = javaobj.JavaObjectConstants.SC_SERIALIZABLE
cls._classdesc_SKS.fields_names = ['algorithm', 'key']
cls._classdesc_SKS.fields_types = ['Ljava/lang/String;', '[B']
obj = javaobj.JavaObject()
obj.classdesc = cls._classdesc_SKS
obj.algorithm = javaobj.JavaString(algorithm)
obj.key = bytes2java(key)
return obj
@classmethod
def _java_KeyRep(cls, algorithm, encoded, xformat, xtype):
if not cls._classdesc_KRT:
classdesc_Enum = javaobj.JavaClass()
classdesc_Enum.name = "java.lang.Enum"
classdesc_Enum.serialVersionUID = 0
classdesc_Enum.flags = javaobj.JavaObjectConstants.SC_ENUM | javaobj.JavaObjectConstants.SC_SERIALIZABLE
cls._classdesc_KRT = javaobj.JavaClass()
cls._classdesc_KRT.name = "java.security.KeyRep$Type"
cls._classdesc_KRT.serialVersionUID = 0
cls._classdesc_KRT.flags = javaobj.JavaObjectConstants.SC_ENUM | javaobj.JavaObjectConstants.SC_SERIALIZABLE
cls._classdesc_KRT.superclass = classdesc_Enum
if not cls._classdesc_KR:
cls._classdesc_KR = javaobj.JavaClass()
cls._classdesc_KR.name = "java.security.KeyRep"
cls._classdesc_KR.serialVersionUID = -4757683898830641853
cls._classdesc_KR.flags = javaobj.JavaObjectConstants.SC_SERIALIZABLE
cls._classdesc_KR.fields_names = ['algorithm', 'encoded', 'format', 'type']
cls._classdesc_KR.fields_types = ['Ljava/lang/String;', '[B', 'Ljava/lang/String;', 'Ljava/security/KeyRep$Type;']
type_obj = javaobj.JavaEnum()
type_obj.classdesc = cls._classdesc_KRT
type_obj.constant = javaobj.JavaString(xtype)
obj = javaobj.JavaObject()
obj.classdesc = cls._classdesc_KR
obj.algorithm = javaobj.JavaString(algorithm)
obj.encoded = bytes2java(encoded)
obj.format = javaobj.JavaString(xformat)
obj.type = type_obj
return obj
# --------------------------------------------------------------------------
class KeyStore(AbstractKeystore):
"""
Represents a loaded JKS or JCEKS keystore.
"""
ENTRY_TYPE_PRIVATE_KEY = 1
ENTRY_TYPE_CERTIFICATE = 2
ENTRY_TYPE_SECRET_KEY = 3
@classmethod
def new(cls, store_type, store_entries):
"""
Helper function to create a new KeyStore.
:param string store_type: What kind of keystore
the store should be. Valid options are jks or jceks.
:param list store_entries: Existing entries that
should be added to the keystore.
:returns: A loaded :class:`KeyStore` instance,
with the specified entries.
:raises DuplicateAliasException: If some of the
entries have the same alias.
:raises UnsupportedKeyStoreTypeException: If the keystore is of
an unsupported type
:raises UnsupportedKeyStoreEntryTypeException: If some
of the keystore entries are unsupported (in this keystore type)
"""
if store_type not in ['jks', 'jceks']:
raise UnsupportedKeystoreTypeException("The Keystore Type '%s' is not supported" % store_type)
entries = {}
for entry in store_entries:
if not isinstance(entry, AbstractKeystoreEntry):
raise UnsupportedKeystoreEntryTypeException("Entries must be a KeyStore Entry")
if store_type != 'jceks' and isinstance(entry, SecretKeyEntry):
raise UnsupportedKeystoreEntryTypeException('Secret Key only allowed in JCEKS keystores')
alias = entry.alias
if alias in entries:
raise DuplicateAliasException("Found duplicate alias '%s'" % alias)
entries[alias] = entry
return cls(store_type, entries)
@classmethod
def loads(cls, data, store_password, try_decrypt_keys=True):
"""Loads the given keystore file using the supplied password for
verifying its integrity, and returns a :class:`KeyStore` instance.
Note that entries in the store that represent some form of
cryptographic key material are stored in encrypted form, and
therefore require decryption before becoming accessible.
Upon original creation of a key entry in a Java keystore,
users are presented with the choice to either use the same
password as the store password, or use a custom one. The most
common choice is to use the store password for the individual
key entries as well.
For ease of use in this typical scenario, this function will
attempt to decrypt each key entry it encounters with the store
password:
- If the key can be successfully decrypted with the store
password, the entry is returned in its decrypted form, and
its attributes are immediately accessible.
- If the key cannot be decrypted with the store password, the
entry is returned in its encrypted form, and requires a
manual follow-up decrypt(key_password) call from the user
before its individual attributes become accessible.
Setting ``try_decrypt_keys`` to ``False`` disables this automatic
decryption attempt, and returns all key entries in encrypted
form.
You can query whether a returned entry object has already been
decrypted by calling the :meth:`is_decrypted` method on it.
Attempting to access attributes of an entry that has not yet
been decrypted will result in a
:class:`~jks.util.NotYetDecryptedException`.
:param bytes data: Byte string representation of the keystore
to be loaded.
:param str password: Keystore password string
:param bool try_decrypt_keys: Whether to automatically try to
decrypt any encountered key entries using the same password
as the keystore password.
:returns: A loaded :class:`KeyStore` instance, if the keystore
could be successfully parsed and the supplied store password
is correct.
If the ``try_decrypt_keys`` parameter was set to ``True``, any
keys that could be successfully decrypted using the store
password have already been decrypted; otherwise, no atttempt
to decrypt any key entries is made.
:raises BadKeystoreFormatException: If the keystore is malformed
in some way
:raises UnsupportedKeystoreVersionException: If the keystore
contains an unknown format version number
:raises KeystoreSignatureException: If the keystore signature
could not be verified using the supplied store password
:raises DuplicateAliasException: If the keystore contains
duplicate aliases
"""
store_type = ""
magic_number = data[:4]
if magic_number == MAGIC_NUMBER_JKS:
store_type = "jks"
elif magic_number == MAGIC_NUMBER_JCEKS:
store_type = "jceks"
else:
raise BadKeystoreFormatException('Not a JKS or JCEKS keystore'
' (magic number wrong; expected'
' FEEDFEED or CECECECE)')
try:
version = b4.unpack_from(data, 4)[0]
if version != 2:
tmpl = 'Unsupported keystore version; expected v2, found v%r'
raise UnsupportedKeystoreVersionException(tmpl % version)
entries = {}
entry_count = b4.unpack_from(data, 8)[0]
pos = 12
for i in range(entry_count):
tag = b4.unpack_from(data, pos)[0]; pos += 4
alias, pos = cls._read_utf(data, pos, kind="entry alias")
timestamp = int(b8.unpack_from(data, pos)[0]); pos += 8 # milliseconds since UNIX epoch
if tag == cls.ENTRY_TYPE_PRIVATE_KEY:
entry, pos = cls._read_private_key(data, pos, store_type)
elif tag == cls.ENTRY_TYPE_CERTIFICATE:
entry, pos = cls._read_trusted_cert(data, pos, store_type)
elif tag == cls.ENTRY_TYPE_SECRET_KEY:
if store_type != "jceks":
raise BadKeystoreFormatException("Unexpected entry tag {0} encountered in JKS keystore; only supported in JCEKS keystores".format(tag))
entry, pos = cls._read_secret_key(data, pos, store_type)
else:
raise BadKeystoreFormatException("Unexpected keystore entry tag %d", tag)
entry.alias = alias
entry.timestamp = timestamp
if try_decrypt_keys:
try:
entry.decrypt(store_password)
except (DecryptionFailureException, IllegalPasswordCharactersException):
# Note: IllegalPasswordCharactersException can happen here in the case of JCEKS keystores; JCEKS stores have the restriction that key passwords
# must be ASCII-only, but the store password can be anything it wants. So we might get IllegalPasswordCharactersException if the store password
# is non-ASCII and we try to decrypt a key with it.
pass # ok, let user call decrypt() manually
if alias in entries:
raise DuplicateAliasException("Found duplicate alias '%s'" % alias)
entries[alias] = entry
except struct.error as e:
raise BadKeystoreFormatException(e)
# check keystore integrity (uses UTF-16BE encoding of the password)
hash_fn = hashlib.sha1
hash_digest_size = hash_fn().digest_size
store_password_utf16 = store_password.encode('utf-16be')
expected_hash = hash_fn(store_password_utf16 + SIGNATURE_WHITENING + data[:pos]).digest()
found_hash = data[pos:pos+hash_digest_size]
if len(found_hash) != hash_digest_size:
tmpl = "Bad signature size; found %d bytes, expected %d bytes"
raise BadKeystoreFormatException(tmpl % (len(found_hash),
hash_digest_size))
if expected_hash != found_hash:
raise KeystoreSignatureException("Hash mismatch; incorrect keystore password?")
return cls(store_type, entries)
def saves(self, store_password, entry_passwords=None):
"""
Saves the keystore so that it can be read by other applications.
If any of the private keys are unencrypted, they will be encrypted
with the same password as the keystore.
:param str store_password: Password for the created keystore
(and for any unencrypted keys)
:returns: A byte string representation of the keystore.
:raises UnsupportedKeystoreTypeException: If the keystore
is of an unsupported type
:raises UnsupportedKeystoreEntryTypeException: If the keystore
contains an unsupported entry type
"""
entry_passwords = (entry_passwords or {})
if self.store_type == 'jks':
keystore = MAGIC_NUMBER_JKS
elif self.store_type == 'jceks':
keystore = MAGIC_NUMBER_JCEKS
else:
raise UnsupportedKeystoreTypeException("Only JKS and JCEKS keystores are supported")
keystore += b4.pack(2) # version 2
keystore += b4.pack(len(self.entries))
for alias, item in self.entries.items():
key_password = entry_passwords.get(alias, store_password)
if isinstance(item, TrustedCertEntry):
keystore += self._write_trusted_cert(alias, item)
elif isinstance(item, PrivateKeyEntry):
encrypted_form = item._encrypt_for(self.store_type, key_password)
keystore += self._write_private_key(alias, item, encrypted_form)
elif isinstance(item, SecretKeyEntry):
if self.store_type != 'jceks':
raise UnsupportedKeystoreEntryTypeException('Secret Key only allowed in JCEKS keystores')
encrypted_form = item._encrypt_for(self.store_type, key_password)
keystore += self._write_secret_key_entry(alias, item, encrypted_form)
else:
raise UnsupportedKeystoreEntryTypeException("Unknown entry type in keystore")
hash_fn = hashlib.sha1
store_password_utf16 = store_password.encode('utf-16be')
hash = hash_fn(store_password_utf16 + SIGNATURE_WHITENING + keystore).digest()
keystore += hash
return keystore
def __init__(self, store_type, entries):
super(KeyStore, self).__init__(store_type, entries)
@property
def certs(self):
"""A subset of the :attr:`entries` dictionary, filtered down to only
those entries of type :class:`TrustedCertEntry`."""
return dict([(a, e) for a, e in self.entries.items()
if isinstance(e, TrustedCertEntry)])
@property
def secret_keys(self):
"""A subset of the :attr:`entries` dictionary, filtered down to only
those entries of type :class:`SecretKeyEntry`."""
return dict([(a, e) for a, e in self.entries.items()
if isinstance(e, SecretKeyEntry)])
@property
def private_keys(self):
"""A subset of the :attr:`entries` dictionary, filtered down to only
those entries of type :class:`PrivateKeyEntry`."""
return dict([(a, e) for a, e in self.entries.items()
if isinstance(e, PrivateKeyEntry)])
@classmethod
def _read_trusted_cert(cls, data, pos, store_type):
cert_type, pos = cls._read_utf(data, pos, kind="certificate type")
cert_data, pos = cls._read_data(data, pos)
entry = TrustedCertEntry(type=cert_type, cert=cert_data, store_type=store_type)
return entry, pos
@classmethod
def _read_private_key(cls, data, pos, store_type):
ber_data, pos = cls._read_data(data, pos)
chain_len = b4.unpack_from(data, pos)[0]
pos += 4
cert_chain = []
for j in range(chain_len):
cert_type, pos = cls._read_utf(data, pos, kind="certificate type")
cert_data, pos = cls._read_data(data, pos)
cert_chain.append((cert_type, cert_data))
entry = PrivateKeyEntry(cert_chain=cert_chain, encrypted=ber_data, store_type=store_type)
return entry, pos
@classmethod
def _read_secret_key(cls, data, pos, store_type):
# SecretKeys are stored in the key store file through Java's
# serialization mechanism, i.e. as an actual serialized Java
# object embedded inside the file. The objects that get stored
# are not the SecretKey instances themselves though, as that
# would trivially expose the key without the need for a
# passphrase to gain access to it.
#
# Instead, an object of type javax.crypto.SealedObject is
# written. The purpose of this class is specifically to
# securely serialize objects that contain secret values by
# applying a password-based encryption scheme to the
# serialized form of the object to be protected. Only the
# resulting ciphertext is then stored by the serialized form
# of the SealedObject instance.
#
# To decrypt the SealedObject, the correct passphrase must be
# given to be able to decrypt the underlying object's
# serialized form. Once decrypted, one more de-serialization
# will result in the original object being restored.
#
# The default key protector used by the SunJCE provider
# returns an instance of type SealedObjectForKeyProtector, a
# (direct) subclass of SealedObject, which uses Java's
# custom/unpublished PBEWithMD5AndTripleDES algorithm.
#
# Class member structure:
#
# SealedObjectForKeyProtector:
# static final long serialVersionUID = -3650226485480866989L;
#
# SealedObject:
# static final long serialVersionUID = 4482838265551344752L;
# private byte[] encryptedContent; # The serialized underlying object, in encrypted format.
# private String sealAlg; # The algorithm that was used to seal this object.
# private String paramsAlg; # The algorithm of the parameters used.
# protected byte[] encodedParams; # The cryptographic parameters used by the sealing Cipher, encoded in the default format.
sealed_obj, pos = cls._read_java_obj(data, pos, ignore_remaining_data=True)
if not java_is_subclass(sealed_obj, "javax.crypto.SealedObject"):
raise UnexpectedJavaTypeException("Unexpected sealed object type '%s'; not a subclass of javax.crypto.SealedObject" % sealed_obj.get_class().name)
entry = SecretKeyEntry(sealed_obj=sealed_obj, store_type=store_type)
return entry, pos
@classmethod
def _read_java_obj(cls, data, pos, ignore_remaining_data=False):
data_stream = BytesIO(data[pos:])
obj = javaobj.load(data_stream, ignore_remaining_data=ignore_remaining_data)
obj_size = data_stream.tell()
return obj, pos + obj_size
@classmethod
def _write_private_key(cls, alias, item, encrypted_form):
private_key_entry = b4.pack(cls.ENTRY_TYPE_PRIVATE_KEY)
private_key_entry += cls._write_utf(alias)
private_key_entry += b8.pack(item.timestamp)
private_key_entry += cls._write_data(encrypted_form)
private_key_entry += b4.pack(len(item.cert_chain))
for cert in item.cert_chain:
private_key_entry += cls._write_utf(cert[0])
private_key_entry += cls._write_data(cert[1])
return private_key_entry
@classmethod
def _write_trusted_cert(cls, alias, item):
trusted_cert = b4.pack(cls.ENTRY_TYPE_CERTIFICATE)
trusted_cert += cls._write_utf(alias)
trusted_cert += b8.pack(item.timestamp)
trusted_cert += cls._write_utf('X.509')
trusted_cert += cls._write_data(item.cert)
return trusted_cert
@classmethod
def _write_secret_key_entry(cls, alias, item, encrypted_form):
result = b4.pack(cls.ENTRY_TYPE_SECRET_KEY)
result += cls._write_utf(alias)
result += b8.pack(item.timestamp)
result += javaobj.dumps(encrypted_form)
return result
|
|
# need documentation
import sys
if sys.version_info <= (3, 0):
print ("Sorry, this program requires Python 3.x, not Python 2.x\n")
sys.exit(1)
import numpy as np
"""
Problem Statement:
Classify a numeric series in groups.
Use case 1: Classify a natural number series in discrete groups.
calc_groupby(list_l, gc_i)
In:
list_l = [1,2,4,3,6,38,33,89,86,87,99]
gc_i = 4 #Desired group count
my_d = calc_groupby (list_l, gc_i)
Out:
{
0: 1,2,3,4,6
1: 33,38
2: 86,87,89
3: 99
}
Use case 2: Get total possible groups for a natural number series
calc_total_group_count (list_l)
In:
list_l = [1,2,4,3,6,38,33,89,86,87,99]
total_gc_i = calc_total_group_count(list_l)
Out:
7
"""
class calc_groupby:
self = None;
# start: private methods
def __get_sorted_pair (self, list_l):
""" Return a sorted numpy array of nx2 dimensions
Parameters
----------
arg1: list
A list of natural numbers with at least 1 element
Returns
-------
array( [ [number1, number2] ] )
[(first element, next element), (next element, element after next ), ..., (last element, None)]
Example 1:
-------
__get_sorted_pair ([4,3,2,1,2])
array([ [1,2],[2,2],[2,3],[3,4],[4,None] ])
Example 2:
-------
__get_sorted_pair ([])
None
"""
try:
#convert list to a numpy array
a = np.array(list_l);
#sort array
sorted_list_a = np.sort(a);
#get series of next elements, return None for the element last
sorted_next_a = np.array([
(sorted_list_a[idx+1] if idx < len(sorted_list_a)-1 else None)
for idx,current in enumerate(sorted_list_a)
]);
#combine array of current and next elements, and return the result
sorted_pair_a = np.column_stack((sorted_list_a, sorted_next_a));
return sorted_pair_a;
except ValueError:
return None
def __get_diff_between_pairs (self, sorted_pair_a):
try:
current_a = sorted_pair_a[:, 0];#slice to get all current elements
next_a = sorted_pair_a[:, 1];#slice to get all next elements
if next_a[-1] is None and current_a[-1] is not None:
next_a[-1] = current_a[-1];
diff_a = next_a - current_a;
return diff_a;
except ValueError:
return None
def __get_groups (self, sorted_pair_a):
try:
diff_a = self.__get_diff_between_pairs(sorted_pair_a);
groups_a = np.unique(diff_a);
return groups_a, diff_a;
except ValueError:
return None
def __addResultItem (self, dict_d, key, valueToAdd):
try:
if key in dict_d:
list_l = dict_d[key];
if list_l is None or len(list_l) == 0:
list_l = [];
else:
list_l = [];
if valueToAdd not in list_l:
list_l.extend([valueToAdd]);
dict_d[key] = list_l
return dict_d;
except ValueError:
return None
# end: private methods
# start: public methods
def calc_total_group_count (self, list_l):
""" Return total number of possible groups a list of natural numbers can be divided into
Parameters
----------
arg1: list
A list of natural numbers with at least 1 element
Returns
-------
int
Total number of possible groups the list can be divided into
Raises
-------
ValueError
When list_l is empty
Example 1:
-------
>>> t.calc_total_group_count ([1,2,4,3,6,38,33,89,86,87,99])
7
Example 2: Invalid List
-------
>>> t.calc_total_group_count ([])
Traceback (most recent call last):
raise ValueError ("Parameter [list] must be a valid natural number list with 1 or more elements", list_l)
ValueError: ('Parameter [list] must be a valid natural number list with 1 or more elements', [])
"""
try:
#start: parameter validation check
if len(list_l) < 1:
raise ValueError ("Parameter [list] must be a valid natural number list with 1 or more elements", list_l)
#end: parameter validation check
sorted_pair_a = self.__get_sorted_pair (list_l);
groups_a, diff_a = self.__get_groups (sorted_pair_a);
return len(groups_a);
except (ValueError, ValueError) as error:
raise;
def calc_group_by (self, list_l, groupcount_i):
""" Return a dictionary containing group of numbers of a series
Parameters
----------
arg1: list
A list of natural numbers with at least 1 element
arg2: int
Desired number of groups to split the list into. It should be <= number of elements in the list
Returns
-------
dict { group_id, [series of numbers belonging to the group] }
Raises
-------
ValueError
When list_l is empty
or
When groupcount_i (Desired Group count) is larger than number of elements in the list_l
Example 1:
-------
>>> t.calc_group_by ([1,2,4,3,6,38,33,89,86,87,99], 4)
{0: [1, 2, 3, 4, 6], 1: [33, 38], 2: [86, 87, 89], 3: [99]}
Example 2: Invalid List
-------
>>> t.calc_group_by ([], 4)
Traceback (most recent call last):
raise ValueError ("Parameter [list] must be a valid natural number list with 1 or more elements", list_l)
ValueError: ('Parameter [list] must be a valid natural number list with 1 or more elements', [])
Example 3: Invalid Desired Group count
-------
>>> t.calc_group_by ([1,2], 4)
Traceback (most recent call last):
ValueError: ('Parameter [Desired Group Count] cannot be more than number of list elements', [1, 2], 4)
"""
try:
#start: parameter validation check
if len(list_l) < 1:
raise ValueError ("Parameter [list] must be a valid natural number list with 1 or more elements", list_l)
if len(list_l) < groupcount_i:
raise ValueError ('Parameter [Desired Group Count] cannot be more than number of list elements', list_l, groupcount_i)
#end: parameter validation check
sorted_pair_a = self.__get_sorted_pair (list_l);
groups_a, diff_a = self.__get_groups (sorted_pair_a);
total_group_count_i = len(groups_a);
current_a = sorted_pair_a[:, 0];#slice to get all current elements
next_a = sorted_pair_a[:, 1];#slice to get all next elements
desired_group_diff = groups_a[-groupcount_i];
groupId_i = 0;
result_d = {}
for i in range(0,len(current_a)-1):
current_item_i = current_a[i]
next_item_i = next_a[i]
diff_i = diff_a [i]
result_d = self.__addResultItem(result_d, groupId_i, current_item_i)
if diff_i > desired_group_diff:
groupId_i += 1
self.__addResultItem (result_d, groupId_i, next_item_i)
#print(i, current_item_i, next_item_i, diff_i, groupId_i, result_d);
return result_d;
except (ValueError, ValueError) as error:
raise;
# end: public methods
# end of class
if __name__ == "__main__":
import doctest;
doctest.testmod(extraglobs= {'t': calc_groupby()});
|
|
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""module to provide updating installing process function.
.. moduleauthor:: Xiaodong Wang <[email protected]>
"""
import logging
from compass.log_analyzor.adapter_matcher import AdapterItemMatcher
from compass.log_analyzor.adapter_matcher import AdapterMatcher
from compass.log_analyzor.adapter_matcher import OSMatcher
from compass.log_analyzor.adapter_matcher import PackageMatcher
from compass.log_analyzor.file_matcher import FileMatcher
from compass.log_analyzor.line_matcher import IncrementalProgress
from compass.log_analyzor.line_matcher import LineMatcher
# TODO(weidong): reconsider intialization method for the following.
OS_INSTALLER_CONFIGURATIONS = {
'Ubuntu': AdapterItemMatcher(
file_matchers=[
FileMatcher(
filename='syslog',
min_progress=0.0,
max_progress=1.0,
line_matchers={
'start': LineMatcher(
pattern=r'.*',
progress=.05,
message_template='start installing',
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='ethdetect'
),
'ethdetect': LineMatcher(
pattern=r'Menu.*item.*\'ethdetect\'.*selected',
progress=.1,
message_template='ethdetect selected',
unmatch_nextline_next_matcher_name='ethdetect',
match_nextline_next_matcher_name='netcfg'
),
'netcfg': LineMatcher(
pattern=r'Menu.*item.*\'netcfg\'.*selected',
progress=.12,
message_template='netcfg selected',
unmatch_nextline_next_matcher_name='netcfg',
match_nextline_next_matcher_name='network-preseed'
),
'network-preseed': LineMatcher(
pattern=r'Menu.*item.*\'network-preseed\'.*selected',
progress=.15,
message_template='network-preseed selected',
unmatch_nextline_next_matcher_name='network-preseed',
match_nextline_next_matcher_name='localechooser'
),
'localechoose': LineMatcher(
pattern=r'Menu.*item.*\'localechooser\'.*selected',
progress=.18,
message_template='localechooser selected',
unmatch_nextline_next_matcher_name='localechooser',
match_nextline_next_matcher_name='download-installer'
),
'download-installer': LineMatcher(
pattern=(
r'Menu.*item.*\'download-installer\'.*selected'
),
progress=.2,
message_template='download installer selected',
unmatch_nextline_next_matcher_name=(
'download-installer'),
match_nextline_next_matcher_name='clock-setup'
),
'clock-setup': LineMatcher(
pattern=r'Menu.*item.*\'clock-setup\'.*selected',
progress=.3,
message_template='clock-setup selected',
unmatch_nextline_next_matcher_name='clock-setup',
match_nextline_next_matcher_name='disk-detect'
),
'disk-detect': LineMatcher(
pattern=r'Menu.*item.*\'disk-detect\'.*selected',
progress=.32,
message_template='disk-detect selected',
unmatch_nextline_next_matcher_name='disk-detect',
match_nextline_next_matcher_name='partman-base'
),
'partman-base': LineMatcher(
pattern=r'Menu.*item.*\'partman-base\'.*selected',
progress=.35,
message_template='partman-base selected',
unmatch_nextline_next_matcher_name='partman-base',
match_nextline_next_matcher_name='live-installer'
),
'live-installer': LineMatcher(
pattern=r'Menu.*item.*\'live-installer\'.*selected',
progress=.45,
message_template='live-installer selected',
unmatch_nextline_next_matcher_name='live-installer',
match_nextline_next_matcher_name='pkgsel'
),
'pkgsel': LineMatcher(
pattern=r'Menu.*item.*\'pkgsel\'.*selected',
progress=.5,
message_template='pkgsel selected',
unmatch_nextline_next_matcher_name='pkgsel',
match_nextline_next_matcher_name='grub-installer'
),
'grub-installer': LineMatcher(
pattern=r'Menu.*item.*\'grub-installer\'.*selected',
progress=.9,
message_template='grub-installer selected',
unmatch_nextline_next_matcher_name='grub-installer',
match_nextline_next_matcher_name='finish-install'
),
'finish-install': LineMatcher(
pattern=r'Menu.*item.*\'finish-install\'.*selected',
progress=.95,
message_template='finish-install selected',
unmatch_nextline_next_matcher_name='finish-install',
match_nextline_next_matcher_name='finish-install-done'
),
'finish-install-done': LineMatcher(
pattern=r'Running.*finish-install.d/.*save-logs',
progress=1.0,
message_template='finish-install is done',
unmatch_nextline_next_matcher_name=(
'finish-install-done'
),
match_nextline_next_matcher_name='exit'
),
}
),
FileMatcher(
filename='status',
min_progress=.2,
max_progress=.3,
line_matchers={
'start': LineMatcher(
pattern=r'Package: (?P<package>.*)',
progress=IncrementalProgress(0.0, 0.99, 0.05),
message_template='Installing udeb %(package)s',
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='start'
)
}
),
FileMatcher(
filename='initial-status',
min_progress=.5,
max_progress=.9,
line_matchers={
'start': LineMatcher(
pattern=r'Package: (?P<package>.*)',
progress=IncrementalProgress(0.0, 0.99, 0.01),
message_template='Installing deb %(package)s',
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='start'
)
}
),
]
),
'CentOS': AdapterItemMatcher(
file_matchers=[
FileMatcher(
filename='sys.log',
min_progress=0.0,
max_progress=0.1,
line_matchers={
'start': LineMatcher(
pattern=r'NOTICE (?P<message>.*)',
progress=IncrementalProgress(.1, .9, .1),
message_template='%(message)s',
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='exit'
),
}
),
FileMatcher(
filename='anaconda.log',
min_progress=0.1,
max_progress=1.0,
line_matchers={
'start': LineMatcher(
pattern=r'setting.*up.*kickstart',
progress=.1,
message_template=(
'Setting up kickstart configurations'),
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='STEP_STAGE2'
),
'STEP_STAGE2': LineMatcher(
pattern=r'starting.*STEP_STAGE2',
progress=.15,
message_template=(
'Downloading installation '
'images from server'),
unmatch_nextline_next_matcher_name='STEP_STAGE2',
match_nextline_next_matcher_name='start_anaconda'
),
'start_anaconda': LineMatcher(
pattern=r'Running.*anaconda.*script',
progress=.2,
unmatch_nextline_next_matcher_name=(
'start_anaconda'),
match_nextline_next_matcher_name=(
'start_kickstart_pre')
),
'start_kickstart_pre': LineMatcher(
pattern=r'Running.*kickstart.*pre.*script',
progress=.25,
unmatch_nextline_next_matcher_name=(
'start_kickstart_pre'),
match_nextline_next_matcher_name=(
'kickstart_pre_done')
),
'kickstart_pre_done': LineMatcher(
pattern=(
r'All.*kickstart.*pre.*script.*have.*been.*run'),
progress=.3,
unmatch_nextline_next_matcher_name=(
'kickstart_pre_done'),
match_nextline_next_matcher_name=(
'start_enablefilesystem')
),
'start_enablefilesystem': LineMatcher(
pattern=r'moving.*step.*enablefilesystems',
progress=0.3,
message_template=(
'Performing hard-disk partitioning and '
'enabling filesystems'),
unmatch_nextline_next_matcher_name=(
'start_enablefilesystem'),
match_nextline_next_matcher_name=(
'enablefilesystem_done')
),
'enablefilesystem_done': LineMatcher(
pattern=r'leaving.*step.*enablefilesystems',
progress=.35,
message_template='Filesystems are enabled',
unmatch_nextline_next_matcher_name=(
'enablefilesystem_done'),
match_nextline_next_matcher_name=(
'setup_repositories')
),
'setup_repositories': LineMatcher(
pattern=r'moving.*step.*reposetup',
progress=0.35,
message_template=(
'Setting up Customized Repositories'),
unmatch_nextline_next_matcher_name=(
'setup_repositories'),
match_nextline_next_matcher_name=(
'repositories_ready')
),
'repositories_ready': LineMatcher(
pattern=r'leaving.*step.*reposetup',
progress=0.4,
message_template=(
'Customized Repositories setting up are done'),
unmatch_nextline_next_matcher_name=(
'repositories_ready'),
match_nextline_next_matcher_name='checking_dud'
),
'checking_dud': LineMatcher(
pattern=r'moving.*step.*postselection',
progress=0.4,
message_template='Checking DUD modules',
unmatch_nextline_next_matcher_name='checking_dud',
match_nextline_next_matcher_name='dud_checked'
),
'dud_checked': LineMatcher(
pattern=r'leaving.*step.*postselection',
progress=0.5,
message_template='Checking DUD modules are done',
unmatch_nextline_next_matcher_name='dud_checked',
match_nextline_next_matcher_name='installing_packages'
),
'installing_packages': LineMatcher(
pattern=r'moving.*step.*installpackages',
progress=0.5,
message_template='Installing packages',
unmatch_nextline_next_matcher_name=(
'installing_packages'),
match_nextline_next_matcher_name=(
'packages_installed')
),
'packages_installed': LineMatcher(
pattern=r'leaving.*step.*installpackages',
progress=0.8,
message_template='Packages are installed',
unmatch_nextline_next_matcher_name=(
'packages_installed'),
match_nextline_next_matcher_name=(
'installing_bootloader')
),
'installing_bootloader': LineMatcher(
pattern=r'moving.*step.*instbootloader',
progress=0.9,
message_template='Installing bootloaders',
unmatch_nextline_next_matcher_name=(
'installing_bootloader'),
match_nextline_next_matcher_name=(
'bootloader_installed'),
),
'bootloader_installed': LineMatcher(
pattern=r'leaving.*step.*instbootloader',
progress=1.0,
message_template='bootloaders is installed',
unmatch_nextline_next_matcher_name=(
'bootloader_installed'),
match_nextline_next_matcher_name='exit'
),
}
),
FileMatcher(
filename='install.log',
min_progress=0.56,
max_progress=0.80,
line_matchers={
'start': LineMatcher(
pattern=r'Installing (?P<package>.*)',
progress=IncrementalProgress(0.0, 0.99, 0.005),
message_template='Installing %(package)s',
unmatch_sameline_next_matcher_name='package_complete',
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='start'
),
'package_complete': LineMatcher(
pattern='FINISHED.*INSTALLING.*PACKAGES',
progress=1.0,
message_template='installing packages finished',
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='exit'
),
}
),
]
),
}
PACKAGE_INSTALLER_CONFIGURATIONS = {
'openstack': AdapterItemMatcher(
file_matchers=[
FileMatcher(
filename='chef-client.log',
min_progress=0.1,
max_progress=1.0,
line_matchers={
'start': LineMatcher(
pattern=(
r'Processing\s*(?P<install_type>.*)'
r'\[(?P<package>.*)\].*'),
progress=IncrementalProgress(0.0, .90, 0.005),
message_template=(
'Processing %(install_type)s %(package)s'),
unmatch_sameline_next_matcher_name=(
'chef_complete'),
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='start'
),
'chef_complete': LineMatcher(
pattern=r'Chef.*Run.*complete',
progress=1.0,
message_template='Chef run complete',
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='exit'
),
}
),
]
),
}
ADAPTER_CONFIGURATIONS = [
AdapterMatcher(
os_matcher=OSMatcher(
os_installer_name='cobbler',
os_pattern='CentOS.*',
item_matcher=OS_INSTALLER_CONFIGURATIONS['CentOS'],
min_progress=0.0,
max_progress=0.6),
package_matcher=PackageMatcher(
package_installer_name='chef',
target_system='openstack',
item_matcher=PACKAGE_INSTALLER_CONFIGURATIONS['openstack'],
min_progress=0.6,
max_progress=1.0)
),
AdapterMatcher(
os_matcher=OSMatcher(
os_installer_name='cobbler',
os_pattern='Ubuntu.*',
item_matcher=OS_INSTALLER_CONFIGURATIONS['Ubuntu'],
min_progress=0.0,
max_progress=0.6),
package_matcher=PackageMatcher(
package_installer_name='chef',
target_system='openstack',
item_matcher=PACKAGE_INSTALLER_CONFIGURATIONS['openstack'],
min_progress=0.6,
max_progress=1.0)
),
]
def _get_adapter_matcher(
os_installer, os_name,
package_installer, target_system
):
"""Get adapter matcher by os name and package installer name."""
for configuration in ADAPTER_CONFIGURATIONS:
if configuration.match(os_installer, os_name,
package_installer, target_system):
return configuration
else:
logging.debug('configuration %s does not match %s and %s',
configuration, os_name, target_system)
logging.error('No configuration found with os installer %s os %s '
'package_installer %s, target_system %s',
os_installer, os_name, package_installer, target_system)
return None
def update_progress(os_installer, os_names, package_installer, target_systems,
cluster_hosts):
"""Update adapter installing progress.
:param os_installer: os installer name
:param package_installer: package installer name.
:param cluster_hosts: clusters and hosts in each cluster to update.
:param cluster_hosts: dict of int to list of int.
"""
for clusterid, hostids in cluster_hosts.items():
adapter = _get_adapter_matcher(os_installer, os_names[clusterid],
package_installer,
target_systems[clusterid])
if not adapter:
continue
adapter.update_progress(clusterid, hostids)
|
|
"""Spark schemas for both raw corpus data (from various sources) and the model
which aforementioned raw corpus data converges.
Corpus data comes from many sources which each have their own format(s). All of
these raw corpus data models, e.g., :class:`BenningtonDocument`, map to, or
rather compose, a :class:`Document`, which is both the Unified Data Model of
these sources, as well as the canonical set of OSP documents that gets used by
other data extraction jobs.
"""
import uuid
import tldextract
import os
from pyspark.sql import types as T
from urllib.parse import urlparse
from osp_pipeline.core.utils import requires_attr
from osp_pipeline.core.models import Model
from osp_text_extractor import bytes_to_text
from osp_pipeline.corpus import sources
from osp_pipeline.corpus.utils import get_md5
class V1CorpusDocument(Model):
"""Documents from the v1 OSP corpus (circa 2014).
"""
s3_key = 'v1_corpus_documents'
rows_per_output_partition = 1000
schema = T.StructType([
T.StructField('checksum', T.StringType(), nullable=False),
T.StructField('url', T.StringType()),
T.StructField('provenance', T.StringType()),
T.StructField('retrieved', T.TimestampType()),
T.StructField('data', T.BinaryType()),
T.StructField('data_md5', T.StringType()),
])
@classmethod
def from_source(cls, log_data, doc_data):
"""Build a row from a log + doc data.
Args:
log_data (BytesIO): Raw log data.
doc_data (BytesIO): Raw document data.
Returns: cls
"""
log = sources.SourceV1CorpusLog(log_data)
data = bytearray(doc_data)
return cls(
checksum=log.checksum(),
url=log.url(),
provenance=log.provenance(),
retrieved=log.retrieved(),
data=data,
data_md5=get_md5(data),
)
class V2CorpusWARC(Model):
"""Documents downloaded by OSP scrapers (2016-present), stored as WARCs.
"""
s3_key = 'v2_corpus_warcs'
rows_per_output_partition = None
schema = T.StructType([
T.StructField('id', T.StringType(), nullable=False),
T.StructField('url', T.StringType()),
T.StructField('source_url', T.StringType()),
T.StructField('source_anchor', T.StringType()),
T.StructField('retrieved', T.TimestampType()),
T.StructField('spider_name', T.StringType()),
T.StructField('spider_run_id', T.StringType()),
T.StructField('depth', T.IntegerType()),
T.StructField('hops_from_seed', T.IntegerType()),
T.StructField('status', T.IntegerType()),
T.StructField('data', T.BinaryType()),
T.StructField('data_md5', T.StringType()),
])
@classmethod
def from_source(cls, data):
"""Build a row from raw WARC data.
Args:
data (BytesIO): Raw WARC data.
Returns: cls
"""
warc = sources.SourceV2CorpusWARC(data)
doc_data = warc.doc_data()
# The `warc.doc_data()` function can return `None`, so guard against
# it.
if doc_data is None:
md5 = None
else:
md5 = get_md5(doc_data)
return cls(
id=warc.record_id(),
url=warc.url(),
source_url=warc.source_url(),
source_anchor=warc.source_anchor(),
retrieved=warc.retrieved(),
spider_name=warc.spider_name(),
spider_run_id=warc.spider_run_id(),
depth=warc.depth(),
hops_from_seed=warc.hops_from_seed(),
status=warc.status(),
data=doc_data,
data_md5=md5,
)
class BenningtonDocument(Model):
"""Documents from the Bennington College syllabus repository.
"""
s3_key = 'bennington_documents'
rows_per_output_partition = 1000
schema = T.StructType([
T.StructField('path', T.StringType(), nullable=False),
T.StructField('semester', T.StringType()),
T.StructField('year', T.IntegerType()),
T.StructField('department_code', T.StringType()),
T.StructField('course_number', T.IntegerType()),
T.StructField('data', T.BinaryType()),
T.StructField('data_md5', T.StringType()),
])
@classmethod
def from_source(cls, path, data):
"""Build a row from a path and document data.
Args:
path (str): Relative document path.
data (bytes): Raw document data.
Returns: cls
"""
path = sources.SourceBenningtonPath(path)
data = bytearray(data)
return cls(
path=path.path,
semester=path.semester(),
year=path.year(),
department_code=path.department_code(),
course_number=path.course_number(),
data=data,
data_md5=get_md5(data),
)
class ManualDocument(Model):
"""Manually uploaded docs from a variety of sources, identified by MD5.
For this model, `data_md5` serves as both the md5sum of the data and
the unique identifier. The filename of each file this model
processes should be the md5sum of that file. Each filename is read
and stored in `data_md5` directly and not recomputed.
For help preparing documents in a way that fits this specification,
see the 'bin/upload_manual_docs.py' script.
"""
s3_key = 'manual_documents'
rows_per_output_partition = 1000
schema = T.StructType([
T.StructField('data_md5', T.StringType(), nullable=False),
T.StructField('data', T.BinaryType()),
])
@classmethod
def from_source(cls, filename, data):
"""Build a row from a path and document data.
Args:
filename (str): Filename of document.
data (bytes): Raw document data.
Returns: cls
"""
data = bytearray(data)
return cls(
data_md5=filename,
data=data,
)
class Document(Model):
"""Unified data model (UDM) for the various raw corpus sources.
"""
s3_key = 'documents'
rows_per_output_partition = None
schema = T.StructType([
T.StructField('id', T.LongType(), nullable=False),
T.StructField('corpus', T.StringType(), nullable=False),
T.StructField('corpus_id', T.StringType(), nullable=False),
T.StructField('url', T.StringType()),
T.StructField('source_url', T.StringType()),
T.StructField('source_anchor', T.StringType()),
T.StructField('retrieved', T.TimestampType()),
T.StructField('data', T.BinaryType()),
T.StructField('data_md5', T.StringType()),
])
@requires_attr('url')
def url_domain(self):
"""Extract the registered domain from the URL.
Returns: str or None
"""
return tldextract.extract(self.url).registered_domain
@requires_attr('source_url')
def source_url_domain(self):
"""Extract the registered domain from the source URL.
Returns: str or None
"""
return tldextract.extract(self.source_url).registered_domain
@requires_attr('url')
def url_path_prefix(self):
"""Get the path prefix of the URL. Eg:
http://yale.edu/english/syllabi/doc.pdf -> /english/syllabi
Returns: str or None
"""
parsed = urlparse(self.url)
return os.path.dirname(parsed.path)
# TODO: Add method for getting `source_anchor` info?
class DocumentText(Model):
rows_per_output_partition = None
s3_key = 'document_texts'
schema = T.StructType([
T.StructField('document_id', T.StringType(), nullable=False),
T.StructField('mime_type', T.StringType()),
T.StructField('text', T.StringType()),
T.StructField('text_md5', T.StringType()),
])
|
|
#!/usr/bin/env python
"""HTTP API logic that ties API call handlers with HTTP routes."""
import itertools
import json
import time
import traceback
import urllib2
from werkzeug import exceptions as werkzeug_exceptions
from werkzeug import routing
from werkzeug import wrappers as werkzeug_wrappers
from google.protobuf import json_format
from google.protobuf import symbol_database
import logging
from grr.gui import api_auth_manager
from grr.gui import api_call_handler_base
from grr.gui import api_call_router
from grr.gui import api_value_renderers
from grr.lib import access_control
from grr.lib import config_lib
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import stats
from grr.lib import utils
from grr.lib.aff4_objects import users as aff4_users
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import api_pb2
class Error(Exception):
pass
class PostRequestParsingError(Error):
pass
class UnsupportedHttpMethod(Error):
pass
class AdditionalArgsProcessingError(Error):
pass
class UnexpectedResultTypeError(Error):
pass
class ApiCallRouterNotFoundError(Error):
pass
class RouterMatcher(object):
"""Matches requests to routers (and caches them)."""
def __init__(self):
self._routing_maps_cache = utils.FastStore()
def _BuildHttpRoutingMap(self, router_cls):
"""Builds a werkzeug routing map out of a given router class."""
if not issubclass(router_cls, api_call_router.ApiCallRouter):
raise ValueError("Router has to be an instance of ApiCallRouter.")
routing_map = routing.Map()
# Note: we traverse methods of the base class (ApiCallRouter) to avoid
# potential problems caused by child router classes using the @Http
# annotation (thus adding additional unforeseen HTTP paths/methods). We
# don't want the HTTP API to depend on a particular router implementation.
for _, metadata in router_cls.GetAnnotatedMethods().items():
for http_method, path, unused_options in metadata.http_methods:
routing_map.add(
routing.Rule(path, methods=[http_method], endpoint=metadata))
# This adds support for the next version of the API that uses
# standartized JSON protobuf serialization.
routing_map.add(
routing.Rule(
path.replace("/api/", "/api/v2/"),
methods=[http_method],
endpoint=metadata))
return routing_map
def _GetRoutingMap(self, router):
"""Returns a routing map for a given router instance."""
try:
routing_map = self._routing_maps_cache.Get(router.__class__)
except KeyError:
routing_map = self._BuildHttpRoutingMap(router.__class__)
self._routing_maps_cache.Put(router.__class__, routing_map)
return routing_map
def _SetField(self, args, type_info, value):
"""Sets fields on the arg rdfvalue object."""
if hasattr(type_info, "enum"):
try:
coerced_obj = type_info.enum[value.upper()]
except KeyError:
# A bool is an enum but serializes to "1" / "0" which are both not in
# enum or reverse_enum.
coerced_obj = type_info.type.FromSerializedString(value)
else:
coerced_obj = type_info.type.FromSerializedString(value)
args.Set(type_info.name, coerced_obj)
def _GetArgsFromRequest(self, request, method_metadata, route_args):
"""Builds args struct out of HTTP request."""
format_mode = GetRequestFormatMode(request, method_metadata)
if request.method in ["GET", "HEAD"]:
if method_metadata.args_type:
unprocessed_request = request.args
if hasattr(unprocessed_request, "dict"):
unprocessed_request = unprocessed_request.dict()
args = method_metadata.args_type()
for type_info in args.type_infos:
if type_info.name in route_args:
self._SetField(args, type_info, route_args[type_info.name])
elif type_info.name in unprocessed_request:
self._SetField(args, type_info, unprocessed_request[type_info.name])
else:
args = None
elif request.method in ["POST", "DELETE", "PATCH"]:
try:
args = method_metadata.args_type()
for type_info in args.type_infos:
if type_info.name in route_args:
self._SetField(args, type_info, route_args[type_info.name])
if request.content_type.startswith("multipart/form-data;"):
payload = json.loads(request.form["_params_"])
args.FromDict(payload)
for name, fd in request.files.items():
args.Set(name, fd.read())
elif format_mode == JsonMode.PROTO3_JSON_MODE:
# NOTE: Arguments rdfvalue has to be a protobuf-based RDFValue.
args_proto = args.protobuf()
json_format.Parse(request.get_data(as_text=True) or "{}", args_proto)
args.ParseFromString(args_proto.SerializeToString())
else:
payload = json.loads(request.get_data(as_text=True) or "{}")
if payload:
args.FromDict(payload)
except Exception as e: # pylint: disable=broad-except
logging.exception("Error while parsing POST request %s (%s): %s",
request.path, request.method, e)
raise PostRequestParsingError(e)
else:
raise UnsupportedHttpMethod("Unsupported method: %s." % request.method)
return args
def MatchRouter(self, request):
"""Returns a router for a given HTTP request."""
router = api_auth_manager.API_AUTH_MGR.GetRouterForUser(request.user)
routing_map = self._GetRoutingMap(router)
matcher = routing_map.bind("%s:%s" % (request.environ["SERVER_NAME"],
request.environ["SERVER_PORT"]))
try:
match = matcher.match(request.path, request.method)
except werkzeug_exceptions.NotFound:
raise ApiCallRouterNotFoundError("No API router was found for (%s) %s" %
(request.path, request.method))
router_method_metadata, route_args_dict = match
return (router, router_method_metadata, self._GetArgsFromRequest(
request, router_method_metadata, route_args_dict))
class JSONEncoderWithRDFPrimitivesSupport(json.JSONEncoder):
"""Custom JSON encoder that encodes handlers output.
Custom encoder is required to facilitate usage of primitive values -
booleans, integers and strings - in handlers responses.
If handler references an RDFString, RDFInteger or and RDFBOol when building a
response, it will lead to JSON encoding failure when response encoded,
unless this custom encoder is used. Another way to solve this issue would be
to explicitly call api_value_renderers.RenderValue on every value returned
from the renderer, but it will make the code look overly verbose and dirty.
"""
def default(self, obj):
if isinstance(obj, (rdfvalue.RDFInteger, rdfvalue.RDFBool,
rdfvalue.RDFString)):
return obj.SerializeToDataStore()
return json.JSONEncoder.default(self, obj)
class JsonMode(object):
"""Enum class for various JSON encoding modes."""
PROTO3_JSON_MODE = 0
GRR_JSON_MODE = 1
GRR_ROOT_TYPES_STRIPPED_JSON_MODE = 2
GRR_TYPE_STRIPPED_JSON_MODE = 3
def GetRequestFormatMode(request, method_metadata):
"""Returns JSON format mode corresponding to a given request and method."""
if request.path.startswith("/api/v2/"):
return JsonMode.PROTO3_JSON_MODE
if request.args.get("strip_type_info", ""):
return JsonMode.GRR_TYPE_STRIPPED_JSON_MODE
for http_method, unused_url, options in method_metadata.http_methods:
if (http_method == request.method and
options.get("strip_root_types", False)):
return JsonMode.GRR_ROOT_TYPES_STRIPPED_JSON_MODE
return JsonMode.GRR_JSON_MODE
class HttpRequestHandler(object):
"""Handles HTTP requests."""
@staticmethod
def BuildToken(request, execution_time):
"""Build an ACLToken from the request."""
# The request.args dictionary will also be filled on HEAD calls.
if request.method in ["GET", "HEAD"]:
reason = request.args.get("reason", "")
elif request.method in ["POST", "DELETE", "PATCH"]:
# The header X-GRR-Reason is set in api-service.js.
reason = utils.SmartUnicode(
urllib2.unquote(request.headers.get("X-Grr-Reason", "")))
# We assume that request.user contains the username that we can trust.
# No matter what authentication method is used, the WebAuthManager is
# responsible for authenticating the userand setting request.user to
# a correct value (see gui/webauth.py).
#
# The token that's built here will be later used to find an API router,
# get the ApiCallHandler from the router, and then to call the handler's
# Handle() method. API router will be responsible for all the ACL checks.
token = access_control.ACLToken(
username=request.user,
reason=reason,
process="GRRAdminUI",
expiry=rdfvalue.RDFDatetime.Now() + execution_time)
for field in ["Remote_Addr", "X-Forwarded-For"]:
remote_addr = request.headers.get(field, "")
if remote_addr:
token.source_ips.append(remote_addr)
return token
def _FormatResultAsJson(self, result, format_mode=None):
if result is None:
return dict(status="OK")
if format_mode == JsonMode.PROTO3_JSON_MODE:
return json.loads(json_format.MessageToJson(result.AsPrimitiveProto()))
elif format_mode == JsonMode.GRR_ROOT_TYPES_STRIPPED_JSON_MODE:
result_dict = {}
for field, value in result.ListSetFields():
if isinstance(field,
(rdf_structs.ProtoDynamicEmbedded,
rdf_structs.ProtoEmbedded, rdf_structs.ProtoList)):
result_dict[field.name] = api_value_renderers.RenderValue(value)
else:
result_dict[field.name] = api_value_renderers.RenderValue(value)[
"value"]
return result_dict
elif format_mode == JsonMode.GRR_TYPE_STRIPPED_JSON_MODE:
rendered_data = api_value_renderers.RenderValue(result)
return api_value_renderers.StripTypeInfo(rendered_data)
elif format_mode == JsonMode.GRR_JSON_MODE:
return api_value_renderers.RenderValue(result)
else:
raise ValueError("Invalid format_mode: %s", format_mode)
@staticmethod
def CallApiHandler(handler, args, token=None):
"""Handles API call to a given handler with given args and token."""
result = handler.Handle(args, token=token)
expected_type = handler.result_type
if expected_type is None:
expected_type = None.__class__
if result.__class__ != expected_type:
raise UnexpectedResultTypeError("Expected %s, but got %s." % (
expected_type.__name__, result.__class__.__name__))
return result
def __init__(self, router_matcher=None):
self._router_matcher = router_matcher or RouterMatcher()
def _BuildResponse(self,
status,
rendered_data,
method_name=None,
headers=None,
content_length=None,
token=None,
no_audit_log=False):
"""Builds HTTPResponse object from rendered data and HTTP status."""
# To avoid IE content sniffing problems, escape the tags. Otherwise somebody
# may send a link with malicious payload that will be opened in IE (which
# does content sniffing and doesn't respect Content-Disposition header) and
# IE will treat the document as html and executre arbitrary JS that was
# passed with the payload.
str_data = json.dumps(
rendered_data, cls=JSONEncoderWithRDFPrimitivesSupport)
# XSSI protection and tags escaping
rendered_data = ")]}'\n" + str_data.replace("<", r"\u003c").replace(
">", r"\u003e")
response = werkzeug_wrappers.Response(
rendered_data,
status=status,
content_type="application/json; charset=utf-8")
response.headers[
"Content-Disposition"] = "attachment; filename=response.json"
response.headers["X-Content-Type-Options"] = "nosniff"
if token and token.reason:
response.headers["X-GRR-Reason"] = utils.SmartStr(token.reason)
if method_name:
response.headers["X-API-Method"] = method_name
if no_audit_log:
response.headers["X-No-Log"] = "True"
for key, value in (headers or {}).items():
response.headers[key] = value
if content_length is not None:
response.content_length = content_length
return response
def _BuildStreamingResponse(self, binary_stream, method_name=None):
"""Builds HTTPResponse object for streaming."""
# We get a first chunk of the output stream. This way the likelihood
# of catching an exception that may happen during response generation
# is much higher.
content = binary_stream.GenerateContent()
try:
peek = content.next()
stream = itertools.chain([peek], content)
except StopIteration:
stream = []
response = werkzeug_wrappers.Response(
response=stream,
content_type="binary/octet-stream",
direct_passthrough=True)
response.headers["Content-Disposition"] = ("attachment; filename=%s" %
binary_stream.filename)
if method_name:
response.headers["X-API-Method"] = method_name
if binary_stream.content_length:
response.content_length = binary_stream.content_length
return response
def HandleRequest(self, request):
"""Handles given HTTP request."""
impersonated_username = config_lib.CONFIG["AdminUI.debug_impersonate_user"]
if impersonated_username:
logging.info("Overriding user as %s", impersonated_username)
request.user = config_lib.CONFIG["AdminUI.debug_impersonate_user"]
if not aff4_users.GRRUser.IsValidUsername(request.user):
return self._BuildResponse(
403, dict(message="Invalid username: %s" % request.user))
try:
router, method_metadata, args = self._router_matcher.MatchRouter(request)
except access_control.UnauthorizedAccess as e:
logging.exception("Access denied to %s (%s): %s", request.path,
request.method, e)
additional_headers = {
"X-GRR-Unauthorized-Access-Reason": utils.SmartStr(e.message),
"X-GRR-Unauthorized-Access-Subject": utils.SmartStr(e.subject)
}
return self._BuildResponse(
403,
dict(
message="Access denied by ACL: %s" % utils.SmartStr(e.message),
subject=utils.SmartStr(e.subject)),
headers=additional_headers)
except ApiCallRouterNotFoundError as e:
return self._BuildResponse(404, dict(message=e.message))
except werkzeug_exceptions.MethodNotAllowed as e:
return self._BuildResponse(405, dict(message=e.message))
except Error as e:
logging.exception("Can't match URL to router/method: %s", e)
return self._BuildResponse(500,
dict(
message=str(e),
traceBack=traceback.format_exc()))
# SetUID() is called here so that ACL checks done by the router do not
# clash with datastore ACL checks.
# TODO(user): increase token expiry time.
token = self.BuildToken(request, 60).SetUID()
handler = None
try:
# ACL checks are done here by the router. If this method succeeds (i.e.
# does not raise), then handlers run without further ACL checks (they're
# free to do some in their own implementations, though).
handler = getattr(router, method_metadata.name)(args, token=token)
if handler.args_type != method_metadata.args_type:
raise RuntimeError("Handler args type doesn't match "
"method args type: %s vs %s" %
(handler.args_type, method_metadata.args_type))
binary_result_type = (
api_call_router.RouterMethodMetadata.BINARY_STREAM_RESULT_TYPE)
if (handler.result_type != method_metadata.result_type and
not (handler.result_type is None and
method_metadata.result_type == binary_result_type)):
raise RuntimeError("Handler result type doesn't match "
"method result type: %s vs %s" %
(handler.result_type, method_metadata.result_type))
# HEAD method is only used for checking the ACLs for particular API
# methods.
if request.method == "HEAD":
# If the request would return a stream, we add the Content-Length
# header to the response.
if (method_metadata.result_type ==
method_metadata.BINARY_STREAM_RESULT_TYPE):
binary_stream = handler.Handle(args, token=token)
return self._BuildResponse(
200, {"status": "OK"},
method_name=method_metadata.name,
no_audit_log=method_metadata.no_audit_log_required,
content_length=binary_stream.content_length,
token=token)
else:
return self._BuildResponse(
200, {"status": "OK"},
method_name=method_metadata.name,
no_audit_log=method_metadata.no_audit_log_required,
token=token)
if (method_metadata.result_type ==
method_metadata.BINARY_STREAM_RESULT_TYPE):
binary_stream = handler.Handle(args, token=token)
return self._BuildStreamingResponse(
binary_stream, method_name=method_metadata.name)
else:
format_mode = GetRequestFormatMode(request, method_metadata)
result = self.CallApiHandler(handler, args, token=token)
rendered_data = self._FormatResultAsJson(
result, format_mode=format_mode)
return self._BuildResponse(
200,
rendered_data,
method_name=method_metadata.name,
no_audit_log=method_metadata.no_audit_log_required,
token=token)
except access_control.UnauthorizedAccess as e:
logging.exception("Access denied to %s (%s) with %s: %s", request.path,
request.method, method_metadata.name, e)
additional_headers = {
"X-GRR-Unauthorized-Access-Reason": utils.SmartStr(e.message),
"X-GRR-Unauthorized-Access-Subject": utils.SmartStr(e.subject)
}
return self._BuildResponse(
403,
dict(
message="Access denied by ACL: %s" % e.message,
subject=utils.SmartStr(e.subject)),
headers=additional_headers,
method_name=method_metadata.name,
no_audit_log=method_metadata.no_audit_log_required,
token=token)
except api_call_handler_base.ResourceNotFoundError as e:
return self._BuildResponse(
404,
dict(message=e.message),
method_name=method_metadata.name,
no_audit_log=method_metadata.no_audit_log_required,
token=token)
except NotImplementedError as e:
return self._BuildResponse(
501,
dict(message=e.message),
method_name=method_metadata.name,
no_audit_log=method_metadata.no_audit_log_required,
token=token)
except Exception as e: # pylint: disable=broad-except
logging.exception("Error while processing %s (%s) with %s: %s",
request.path, request.method,
handler.__class__.__name__, e)
return self._BuildResponse(
500,
dict(message=str(e), traceBack=traceback.format_exc()),
method_name=method_metadata.name,
no_audit_log=method_metadata.no_audit_log_required,
token=token)
def RenderHttpResponse(request):
"""Renders HTTP response to a given HTTP request."""
start_time = time.time()
response = HTTP_REQUEST_HANDLER.HandleRequest(request)
total_time = time.time() - start_time
method_name = response.headers.get("X-API-Method", "unknown")
if response.status_code == 200:
status = "SUCCESS"
elif response.status_code == 403:
status = "FORBIDDEN"
elif response.status_code == 404:
status = "NOT_FOUND"
elif response.status_code == 501:
status = "NOT_IMPLEMENTED"
else:
status = "SERVER_ERROR"
if request.method == "HEAD":
metric_name = "api_access_probe_latency"
else:
metric_name = "api_method_latency"
stats.STATS.RecordEvent(
metric_name, total_time, fields=(method_name, "http", status))
return response
HTTP_REQUEST_HANDLER = None
class HttpApiInitHook(registry.InitHook):
"""Register HTTP API handlers."""
def RunOnce(self):
global HTTP_REQUEST_HANDLER
HTTP_REQUEST_HANDLER = HttpRequestHandler()
db = symbol_database.Default()
# Register api_pb2.DESCRIPTOR in the database, so that all API-related
# protos are recognized when Any messages are unpacked.
db.RegisterFileDescriptor(api_pb2.DESCRIPTOR)
stats.STATS.RegisterEventMetric(
"api_method_latency",
fields=[("method_name", str), ("protocol", str), ("status", str)])
stats.STATS.RegisterEventMetric(
"api_access_probe_latency",
fields=[("method_name", str), ("protocol", str), ("status", str)])
|
|
# Copyright 2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
import mock
import warnings
from cassandra.cqlengine import CACHING_ALL, CACHING_NONE
from cassandra.cqlengine.connection import get_session, get_cluster
from cassandra.cqlengine import CQLEngineException
from cassandra.cqlengine import management
from cassandra.cqlengine.management import _get_non_pk_field_names, _get_table_metadata, sync_table, drop_table
from cassandra.cqlengine.models import Model
from cassandra.cqlengine import columns
from tests.integration import CASSANDRA_VERSION, PROTOCOL_VERSION
from tests.integration.cqlengine.base import BaseCassEngTestCase
from tests.integration.cqlengine.query.test_queryset import TestModel
class KeyspaceManagementTest(BaseCassEngTestCase):
def test_create_drop_succeeeds(self):
cluster = get_cluster()
keyspace_ss = 'test_ks_ss'
self.assertNotIn(keyspace_ss, cluster.metadata.keyspaces)
management.create_keyspace_simple(keyspace_ss, 2)
self.assertIn(keyspace_ss, cluster.metadata.keyspaces)
management.drop_keyspace(keyspace_ss)
self.assertNotIn(keyspace_ss, cluster.metadata.keyspaces)
keyspace_nts = 'test_ks_nts'
self.assertNotIn(keyspace_nts, cluster.metadata.keyspaces)
management.create_keyspace_network_topology(keyspace_nts, {'dc1': 1})
self.assertIn(keyspace_nts, cluster.metadata.keyspaces)
management.drop_keyspace(keyspace_nts)
self.assertNotIn(keyspace_nts, cluster.metadata.keyspaces)
class DropTableTest(BaseCassEngTestCase):
def test_multiple_deletes_dont_fail(self):
sync_table(TestModel)
drop_table(TestModel)
drop_table(TestModel)
class LowercaseKeyModel(Model):
first_key = columns.Integer(primary_key=True)
second_key = columns.Integer(primary_key=True)
some_data = columns.Text()
class CapitalizedKeyModel(Model):
firstKey = columns.Integer(primary_key=True)
secondKey = columns.Integer(primary_key=True)
someData = columns.Text()
class PrimaryKeysOnlyModel(Model):
__options__ = {'compaction': {'class': 'LeveledCompactionStrategy'}}
first_ey = columns.Integer(primary_key=True)
second_key = columns.Integer(primary_key=True)
class CapitalizedKeyTest(BaseCassEngTestCase):
def test_table_definition(self):
""" Tests that creating a table with capitalized column names succeeds """
sync_table(LowercaseKeyModel)
sync_table(CapitalizedKeyModel)
drop_table(LowercaseKeyModel)
drop_table(CapitalizedKeyModel)
class FirstModel(Model):
__table_name__ = 'first_model'
first_key = columns.UUID(primary_key=True)
second_key = columns.UUID()
third_key = columns.Text()
class SecondModel(Model):
__table_name__ = 'first_model'
first_key = columns.UUID(primary_key=True)
second_key = columns.UUID()
third_key = columns.Text()
fourth_key = columns.Text()
class ThirdModel(Model):
__table_name__ = 'first_model'
first_key = columns.UUID(primary_key=True)
second_key = columns.UUID()
third_key = columns.Text()
# removed fourth key, but it should stay in the DB
blah = columns.Map(columns.Text, columns.Text)
class FourthModel(Model):
__table_name__ = 'first_model'
first_key = columns.UUID(primary_key=True)
second_key = columns.UUID()
third_key = columns.Text()
# removed fourth key, but it should stay in the DB
renamed = columns.Map(columns.Text, columns.Text, db_field='blah')
class AddColumnTest(BaseCassEngTestCase):
def setUp(self):
drop_table(FirstModel)
def test_add_column(self):
sync_table(FirstModel)
fields = _get_non_pk_field_names(_get_table_metadata(FirstModel))
# this should contain the second key
self.assertEqual(len(fields), 2)
# get schema
sync_table(SecondModel)
fields = _get_non_pk_field_names(_get_table_metadata(FirstModel))
self.assertEqual(len(fields), 3)
sync_table(ThirdModel)
fields = _get_non_pk_field_names(_get_table_metadata(FirstModel))
self.assertEqual(len(fields), 4)
sync_table(FourthModel)
fields = _get_non_pk_field_names(_get_table_metadata(FirstModel))
self.assertEqual(len(fields), 4)
class ModelWithTableProperties(Model):
__options__ = {'bloom_filter_fp_chance': '0.76328',
'comment': 'TxfguvBdzwROQALmQBOziRMbkqVGFjqcJfVhwGR',
'gc_grace_seconds': '2063',
'read_repair_chance': '0.17985',
'dclocal_read_repair_chance': '0.50811'}
key = columns.UUID(primary_key=True)
class TablePropertiesTests(BaseCassEngTestCase):
def setUp(self):
drop_table(ModelWithTableProperties)
def test_set_table_properties(self):
sync_table(ModelWithTableProperties)
expected = {'bloom_filter_fp_chance': 0.76328,
'comment': 'TxfguvBdzwROQALmQBOziRMbkqVGFjqcJfVhwGR',
'gc_grace_seconds': 2063,
'read_repair_chance': 0.17985,
# For some reason 'dclocal_read_repair_chance' in CQL is called
# just 'local_read_repair_chance' in the schema table.
# Source: https://issues.apache.org/jira/browse/CASSANDRA-6717
# TODO: due to a bug in the native driver i'm not seeing the local read repair chance show up
# 'local_read_repair_chance': 0.50811,
}
options = management._get_table_metadata(ModelWithTableProperties).options
self.assertEqual(dict([(k, options.get(k)) for k in expected.keys()]),
expected)
def test_table_property_update(self):
ModelWithTableProperties.__options__['bloom_filter_fp_chance'] = 0.66778
ModelWithTableProperties.__options__['comment'] = 'xirAkRWZVVvsmzRvXamiEcQkshkUIDINVJZgLYSdnGHweiBrAiJdLJkVohdRy'
ModelWithTableProperties.__options__['gc_grace_seconds'] = 96362
ModelWithTableProperties.__options__['read_repair_chance'] = 0.2989
ModelWithTableProperties.__options__['dclocal_read_repair_chance'] = 0.12732
sync_table(ModelWithTableProperties)
table_options = management._get_table_metadata(ModelWithTableProperties).options
self.assertDictContainsSubset(ModelWithTableProperties.__options__, table_options)
def test_bogus_option_update(self):
sync_table(ModelWithTableProperties)
option = 'no way will this ever be an option'
try:
ModelWithTableProperties.__options__[option] = 'what was I thinking?'
self.assertRaisesRegexp(KeyError, "Invalid table option.*%s.*" % option, sync_table, ModelWithTableProperties)
finally:
ModelWithTableProperties.__options__.pop(option, None)
class SyncTableTests(BaseCassEngTestCase):
def setUp(self):
drop_table(PrimaryKeysOnlyModel)
def test_sync_table_works_with_primary_keys_only_tables(self):
sync_table(PrimaryKeysOnlyModel)
# blows up with DoesNotExist if table does not exist
table_meta = management._get_table_metadata(PrimaryKeysOnlyModel)
self.assertIn('LeveledCompactionStrategy', table_meta.as_cql_query())
PrimaryKeysOnlyModel.__options__['compaction']['class'] = 'SizeTieredCompactionStrategy'
sync_table(PrimaryKeysOnlyModel)
table_meta = management._get_table_metadata(PrimaryKeysOnlyModel)
self.assertIn('SizeTieredCompactionStrategy', table_meta.as_cql_query())
class NonModelFailureTest(BaseCassEngTestCase):
class FakeModel(object):
pass
def test_failure(self):
with self.assertRaises(CQLEngineException):
sync_table(self.FakeModel)
class StaticColumnTests(BaseCassEngTestCase):
def test_static_columns(self):
if PROTOCOL_VERSION < 2:
raise unittest.SkipTest("Native protocol 2+ required, currently using: {0}".format(PROTOCOL_VERSION))
class StaticModel(Model):
id = columns.Integer(primary_key=True)
c = columns.Integer(primary_key=True)
name = columns.Text(static=True)
drop_table(StaticModel)
session = get_session()
with mock.patch.object(session, "execute", wraps=session.execute) as m:
sync_table(StaticModel)
self.assertGreater(m.call_count, 0)
statement = m.call_args[0][0].query_string
self.assertIn('"name" text static', statement)
# if we sync again, we should not apply an alter w/ a static
sync_table(StaticModel)
with mock.patch.object(session, "execute", wraps=session.execute) as m2:
sync_table(StaticModel)
self.assertEqual(len(m2.call_args_list), 0)
|
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
import os
import sys
from pyface.action.action import Action
from pyface.confirmation_dialog import confirm
from pyface.constant import YES
from pyface.message_dialog import information
from pyface.tasks.action.task_action import TaskAction
from pyface.tasks.task_window_layout import TaskWindowLayout
from traits.api import Any, List
from pychron.envisage.resources import icon
from pychron.envisage.ui_actions import UIAction, UITaskAction
# ===============================================================================
# help
# ===============================================================================
# from pychron.envisage.user_login import login_file
def restart():
os.execl(sys.executable, *([sys.executable] + sys.argv))
def get_key_binding(k_id):
from pychron.envisage.key_bindings import user_key_map
try:
return user_key_map[k_id][0]
except KeyError:
pass
class myTaskAction(TaskAction):
task_ids = List
def _task_changed(self):
if self.task:
if self.task.id in self.task_ids:
enabled = True
if self.enabled_name:
if self.object:
enabled = bool(
self._get_attr(self.object, self.enabled_name, False)
)
if enabled:
self._enabled = True
else:
self._enabled = False
def _enabled_update(self):
"""
reimplement ListeningAction's _enabled_update
"""
if self.enabled_name:
if self.object:
self.enabled = bool(
self._get_attr(self.object, self.enabled_name, False)
)
else:
self.enabled = False
elif self._enabled is not None:
self.enabled = self._enabled
else:
self.enabled = bool(self.object)
class PAction(UIAction):
def __init__(self, *args, **kw):
super(PAction, self).__init__(*args, **kw)
acc = get_key_binding(self.id)
self.accelerator = acc or self.accelerator
class PTaskAction(UITaskAction):
def __init__(self, *args, **kw):
super(PTaskAction, self).__init__(*args, **kw)
acc = get_key_binding(self.id)
self.accelerator = acc or self.accelerator
class DemoAction(Action):
name = "Demo"
accelerator = "Shift+Ctrl+0"
def perform(self, event):
app = event.task.application
app.info("Demo message: {}".format("Hello version 2.0"))
class StartupTestsAction(Action):
name = "Run Startup Tests"
def perform(self, event):
app = event.task.application
app.do_startup_tests(
force_show_results=True, cancel_auto_close=True, can_cancel=False
)
class KeyBindingsAction(PAction):
name = "Edit Key Bindings"
def perform(self, event):
from pychron.envisage.key_bindings import edit_key_bindings
edit_key_bindings()
class UserAction(PAction):
def _get_current_user(self, event):
app = event.task.application
args = app.id.split(".")
cuser = args[-1]
base_id = ".".join(args[:-1])
return base_id, cuser
class SwitchUserAction(UserAction):
name = "Switch User"
image = icon("user_suit")
def perform(self, event):
pass
# from pychron.envisage.user_login import get_user
#
# base_id, cuser = self._get_current_user(event)
# user = get_user(current=cuser)
# if user:
# # from pychron.paths import paths
# # set login file
# with open(login_file, 'w') as wfile:
# wfile.write(user)
# restart()
class CopyPreferencesAction(UserAction):
name = "Copy Preferences"
def perform(self, event):
pass
# from pychron.envisage.user_login import get_src_dest_user
#
# base_id, cuser = self._get_current_user(event)
# src_name, dest_names = get_src_dest_user(cuser)
#
# if src_name:
#
# for di in dest_names:
# dest_id = '{}.{}'.format(base_id, di)
# src_id = '{}.{}'.format(base_id, src_name)
#
# root = os.path.join(os.path.expanduser('~'), '.enthought')
#
# src_dir = os.path.join(root, src_id)
# dest_dir = os.path.join(root, dest_id)
# if not os.path.isdir(dest_dir):
# os.mkdir(dest_dir)
#
# name = 'preferences.ini'
# dest = os.path.join(dest_dir, name)
# src = os.path.join(src_dir, name)
# shutil.copyfile(src, dest)
class RestartAction(PAction):
name = "Restart"
image = icon("system-restart")
def perform(self, event):
restart()
class WebAction(PAction):
def _open_url(self, url):
import webbrowser
import requests
try:
requests.get(url)
except BaseException as e:
print("web action url:{} exception:{}".format(url, e))
return
webbrowser.open_new(url)
return True
class IssueAction(WebAction):
name = "Add Request/Report Bug"
image = icon("bug")
def perform(self, event):
"""
goto issues page add an request or report bug
"""
app = event.task.window.application
name = app.preferences.get("pychron.general.organization")
if not name:
information(
event.task.window.control,
'Please set an "Organziation" in General Preferences',
)
return
url = "https://github.com/{}/pychron/issues/new".format(name)
self._open_url(url)
class SettingsAction(Action):
def perform(self, event):
app = event.task.window.application
name = app.preferences.get("pychron.general.remote")
if not name:
information(
event.task.window.control,
'Please set an "Laboratory Repo" in General Preferences',
)
return
from pychron.envisage.settings_repo import SettingsRepoManager
from pychron.paths import paths
root = os.path.join(paths.root_dir, ".lab")
exists = os.path.isdir(os.path.join(root, ".git"))
if exists:
repo = SettingsRepoManager()
repo.path = root
repo.open_repo(root)
repo.pull()
else:
url = "https://github.com/{}".format(name)
repo = SettingsRepoManager.clone_from(url, root)
self._perform(repo)
def _perform(self, repo):
raise NotImplementedError
class ApplySettingsAction(SettingsAction):
name = "Apply Settings..."
def _perform(self, repo):
"""
select and apply settings from the laboratory's repository
:param repo:
:return:
"""
repo.apply_settings()
class ShareSettingsAction(SettingsAction):
name = "Share Settings..."
def _perform(self, repo):
"""
save current settings to the laboratory's repository
:param repo:
:return:
"""
repo.share_settings()
class NoteAction(WebAction):
name = "Add Laboratory Note"
image = icon("insert-comment")
def perform(self, event):
"""
goto issues page add an request or report bug
"""
app = event.task.window.application
name = app.preferences.get("pychron.general.remote")
if not name:
information(
event.task.window.control,
'Please set an "Laboratory Repo" in General Preferences',
)
return
url = "https://github.com/{}/issues/new".format(name)
self._open_url(url)
class DocumentationAction(WebAction):
name = "View Documentation"
image = icon("documentation")
def perform(self, event):
"""
goto issues page add an request or report bug
"""
url = "http://pychron.readthedocs.org/en/latest/index.html"
self._open_url(url)
class WaffleAction(WebAction):
name = "View Waffle Board"
image = icon("waffle")
def perform(self, event):
"""
goto waffle page
"""
url = "https://waffle.io/NMGRL/pychron"
self._open_url(url)
class ChangeLogAction(WebAction):
name = "What's New"
image = icon("documentation")
description = "View changelog"
def perform(self, event):
"""
goto issues page add an request or report bug
"""
from pychron.version import __version__
app = event.task.window.application
org = app.preferences.get("pychron.general.organization")
url = "https://github.com/{}/pychron/blob/release/v{}/CHANGELOG.md".format(
org, __version__
)
if not self._open_url(url):
url = "https://github.com/{}/pychron/blob/develop/CHANGELOG.md".format(org)
self._open_url(url)
class AboutAction(PAction):
name = "About Pychron"
def perform(self, event):
app = event.task.window.application
app.about()
class ResetLayoutAction(PTaskAction):
name = "Reset Layout"
image = icon("view-restore")
def perform(self, event):
self.task.window.reset_layout()
class PositionAction(PAction):
name = "Window Positions"
image = icon("window-new")
def perform(self, event):
from pychron.envisage.tasks.layout_manager import LayoutManager
app = event.task.window.application
lm = LayoutManager(app)
lm.edit_traits()
class MinimizeAction(PTaskAction):
name = "Minimize"
accelerator = "Ctrl+m"
def perform(self, event):
app = self.task.window.application
app.active_window.control.showMinimized()
class CloseAction(PTaskAction):
name = "Close"
accelerator = "Ctrl+W"
def perform(self, event):
ok = YES
if len(self.task.window.application.windows) == 1:
ok = confirm(self.task.window.control, message="Quit Pychron?")
if ok == YES:
self.task.window.close()
class CloseOthersAction(PTaskAction):
name = "Close others"
accelerator = "Ctrl+Shift+W"
def perform(self, event):
win = self.task.window
for wi in self.task.window.application.windows:
if wi != win:
wi.close()
class OpenAdditionalWindow(PTaskAction):
name = "Open Additional Window"
description = "Open an additional window of the current active task"
def perform(self, event):
app = self.task.window.application
win = app.create_window(TaskWindowLayout(self.task.id))
win.open()
class RaiseAction(PTaskAction):
window = Any
style = "toggle"
def perform(self, event):
self.window.activate()
self.checked = True
# @on_trait_change('window:deactivated')
# def _on_deactivate(self):
# self.checked = False
class RaiseUIAction(PTaskAction):
style = "toggle"
def perform(self, event):
self.checked = True
class GenericSaveAction(PTaskAction):
name = "Save"
accelerator = "Ctrl+S"
image = icon("document-save")
def perform(self, event):
task = self.task
if hasattr(task, "save"):
task.save()
class GenericSaveAsAction(PTaskAction):
name = "Save As..."
accelerator = "Ctrl+Shift+S"
image = icon("document-save-as")
def perform(self, event):
task = self.task
if hasattr(task, "save_as"):
task.save_as()
class GenericFindAction(PTaskAction):
accelerator = "Ctrl+F"
name = "Find text..."
def perform(self, event):
task = self.task
if hasattr(task, "find"):
task.find()
class FileOpenAction(PAction):
task_id = ""
test_path = ""
image = icon("document-open")
def perform(self, event):
if event.task.id == self.task_id:
task = event.task
task.open()
else:
application = event.task.window.application
win = application.create_window(TaskWindowLayout(self.task_id))
task = win.active_task
if task.open(path=self.test_path):
win.open()
class NewAction(PAction):
task_id = ""
def perform(self, event):
if event.task.id == self.task_id:
task = event.task
task.new()
else:
application = event.task.window.application
win = application.create_window(TaskWindowLayout(self.task_id))
task = win.active_task
if task.new():
win.open()
# class GenericReplaceAction(TaskAction):
# pass
# else:
# manager = self._get_experimentor(event)
# manager.save_as_experiment_queues()
class ToggleFullWindowAction(TaskAction):
name = "Toggle Full Window"
method = "toggle_full_window"
image = icon("view-fullscreen-8")
class EditInitializationAction(Action):
name = "Edit Initialization"
image = icon("brick-edit")
def perform(self, event):
from pychron.envisage.initialization.initialization_edit_view import (
edit_initialization,
)
if edit_initialization():
restart()
class EditTaskExtensionsAction(Action):
name = "Edit UI..."
def perform(self, event):
app = event.task.window.application
from pychron.envisage.task_extensions import edit_task_extensions
if edit_task_extensions(app.available_task_extensions):
restart()
# ============= EOF =============================================
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for schemas."""
from __future__ import absolute_import
import itertools
import sys
import unittest
from typing import ByteString
from typing import List
from typing import Mapping
from typing import NamedTuple
from typing import Optional
from typing import Sequence
import numpy as np
from past.builtins import unicode
from apache_beam.portability.api import schema_pb2
from apache_beam.typehints.schemas import typing_from_runner_api
from apache_beam.typehints.schemas import typing_to_runner_api
IS_PYTHON_3 = sys.version_info.major > 2
class SchemaTest(unittest.TestCase):
""" Tests for Runner API Schema proto to/from typing conversions
There are two main tests: test_typing_survives_proto_roundtrip, and
test_proto_survives_typing_roundtrip. These are both necessary because Schemas
are cached by ID, so performing just one of them wouldn't necessarily exercise
all code paths.
"""
def test_typing_survives_proto_roundtrip(self):
all_nonoptional_primitives = [
np.int8,
np.int16,
np.int32,
np.int64,
np.float32,
np.float64,
unicode,
bool,
]
# The bytes type cannot survive a roundtrip to/from proto in Python 2.
# In order to use BYTES a user type has to use typing.ByteString (because
# bytes == str, and we map str to STRING).
if IS_PYTHON_3:
all_nonoptional_primitives.extend([bytes])
all_optional_primitives = [
Optional[typ] for typ in all_nonoptional_primitives
]
all_primitives = all_nonoptional_primitives + all_optional_primitives
basic_array_types = [Sequence[typ] for typ in all_primitives]
basic_map_types = [
Mapping[key_type,
value_type] for key_type, value_type in itertools.product(
all_primitives, all_primitives)
]
selected_schemas = [
NamedTuple(
'AllPrimitives',
[('field%d' % i, typ) for i, typ in enumerate(all_primitives)]),
NamedTuple('ComplexSchema', [
('id', np.int64),
('name', unicode),
('optional_map', Optional[Mapping[unicode,
Optional[np.float64]]]),
('optional_array', Optional[Sequence[np.float32]]),
('array_optional', Sequence[Optional[bool]]),
])
]
test_cases = all_primitives + \
basic_array_types + \
basic_map_types + \
selected_schemas
for test_case in test_cases:
self.assertEqual(test_case,
typing_from_runner_api(typing_to_runner_api(test_case)))
def test_proto_survives_typing_roundtrip(self):
all_nonoptional_primitives = [
schema_pb2.FieldType(atomic_type=typ)
for typ in schema_pb2.AtomicType.values()
if typ is not schema_pb2.UNSPECIFIED
]
# The bytes type cannot survive a roundtrip to/from proto in Python 2.
# In order to use BYTES a user type has to use typing.ByteString (because
# bytes == str, and we map str to STRING).
if not IS_PYTHON_3:
all_nonoptional_primitives.remove(
schema_pb2.FieldType(atomic_type=schema_pb2.BYTES))
all_optional_primitives = [
schema_pb2.FieldType(nullable=True, atomic_type=typ)
for typ in schema_pb2.AtomicType.values()
if typ is not schema_pb2.UNSPECIFIED
]
all_primitives = all_nonoptional_primitives + all_optional_primitives
basic_array_types = [
schema_pb2.FieldType(array_type=schema_pb2.ArrayType(element_type=typ))
for typ in all_primitives
]
basic_map_types = [
schema_pb2.FieldType(
map_type=schema_pb2.MapType(
key_type=key_type, value_type=value_type)) for key_type,
value_type in itertools.product(all_primitives, all_primitives)
]
selected_schemas = [
schema_pb2.FieldType(
row_type=schema_pb2.RowType(
schema=schema_pb2.Schema(
id='32497414-85e8-46b7-9c90-9a9cc62fe390',
fields=[
schema_pb2.Field(name='field%d' % i, type=typ)
for i, typ in enumerate(all_primitives)
]))),
schema_pb2.FieldType(
row_type=schema_pb2.RowType(
schema=schema_pb2.Schema(
id='dead1637-3204-4bcb-acf8-99675f338600',
fields=[
schema_pb2.Field(
name='id',
type=schema_pb2.FieldType(
atomic_type=schema_pb2.INT64)),
schema_pb2.Field(
name='name',
type=schema_pb2.FieldType(
atomic_type=schema_pb2.STRING)),
schema_pb2.Field(
name='optional_map',
type=schema_pb2.FieldType(
nullable=True,
map_type=schema_pb2.MapType(
key_type=schema_pb2.FieldType(
atomic_type=schema_pb2.STRING
),
value_type=schema_pb2.FieldType(
atomic_type=schema_pb2.DOUBLE
)))),
schema_pb2.Field(
name='optional_array',
type=schema_pb2.FieldType(
nullable=True,
array_type=schema_pb2.ArrayType(
element_type=schema_pb2.FieldType(
atomic_type=schema_pb2.FLOAT)
))),
schema_pb2.Field(
name='array_optional',
type=schema_pb2.FieldType(
array_type=schema_pb2.ArrayType(
element_type=schema_pb2.FieldType(
nullable=True,
atomic_type=schema_pb2.BYTES)
))),
]))),
]
test_cases = all_primitives + \
basic_array_types + \
basic_map_types + \
selected_schemas
for test_case in test_cases:
self.assertEqual(test_case,
typing_to_runner_api(typing_from_runner_api(test_case)))
def test_unknown_primitive_raise_valueerror(self):
self.assertRaises(ValueError, lambda: typing_to_runner_api(np.uint32))
def test_unknown_atomic_raise_valueerror(self):
self.assertRaises(
ValueError, lambda: typing_from_runner_api(
schema_pb2.FieldType(atomic_type=schema_pb2.UNSPECIFIED))
)
@unittest.skipIf(IS_PYTHON_3, 'str is acceptable in python 3')
def test_str_raises_error_py2(self):
self.assertRaises(lambda: typing_to_runner_api(str))
self.assertRaises(lambda: typing_to_runner_api(
NamedTuple('Test', [('int', int), ('str', str)])))
def test_int_maps_to_int64(self):
self.assertEqual(
schema_pb2.FieldType(atomic_type=schema_pb2.INT64),
typing_to_runner_api(int))
def test_float_maps_to_float64(self):
self.assertEqual(
schema_pb2.FieldType(atomic_type=schema_pb2.DOUBLE),
typing_to_runner_api(float))
def test_trivial_example(self):
MyCuteClass = NamedTuple('MyCuteClass', [
('name', unicode),
('age', Optional[int]),
('interests', List[unicode]),
('height', float),
('blob', ByteString),
])
expected = schema_pb2.FieldType(
row_type=schema_pb2.RowType(
schema=schema_pb2.Schema(fields=[
schema_pb2.Field(
name='name',
type=schema_pb2.FieldType(
atomic_type=schema_pb2.STRING),
),
schema_pb2.Field(
name='age',
type=schema_pb2.FieldType(
nullable=True,
atomic_type=schema_pb2.INT64)),
schema_pb2.Field(
name='interests',
type=schema_pb2.FieldType(
array_type=schema_pb2.ArrayType(
element_type=schema_pb2.FieldType(
atomic_type=schema_pb2.STRING)))),
schema_pb2.Field(
name='height',
type=schema_pb2.FieldType(
atomic_type=schema_pb2.DOUBLE)),
schema_pb2.Field(
name='blob',
type=schema_pb2.FieldType(
atomic_type=schema_pb2.BYTES)),
])))
# Only test that the fields are equal. If we attempt to test the entire type
# or the entire schema, the generated id will break equality.
self.assertEqual(expected.row_type.schema.fields,
typing_to_runner_api(MyCuteClass).row_type.schema.fields)
if __name__ == '__main__':
unittest.main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import DeserializationError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class LocalNetworkGatewaysOperations(object):
"""LocalNetworkGatewaysOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2017-06-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-06-01"
self.config = config
def _create_or_update_initial(
self, resource_group_name, local_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'LocalNetworkGateway')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', response)
if response.status_code == 201:
deserialized = self._deserialize('LocalNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, local_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a local network gateway in the specified resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network
gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to the create or update local
network gateway operation.
:type parameters:
~azure.mgmt.network.v2017_06_01.models.LocalNetworkGateway
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
LocalNetworkGateway or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_06_01.models.LocalNetworkGateway]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('LocalNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, local_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified local network gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network
gateway.
:type local_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: LocalNetworkGateway or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_06_01.models.LocalNetworkGateway or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def _delete_initial(
self, resource_group_name, local_network_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, local_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified local network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network
gateway.
:type local_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the local network gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of LocalNetworkGateway
:rtype:
~azure.mgmt.network.v2017_06_01.models.LocalNetworkGatewayPaged[~azure.mgmt.network.v2017_06_01.models.LocalNetworkGateway]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.LocalNetworkGatewayPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.LocalNetworkGatewayPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
|
# main.py
# main loop to be run on a beaglebone for use with other smart relay components
# Author: Brandon Mayfield
import gspread # allows access to Google spreadsheets
import Adafruit_BBIO.ADC as ADC # ADC control
import Adafruit_BBIO.GPIO as GPIO # GPIO control
from time import sleep # pausing
import time
import datetime # timestamp
import thread # multithreading
import threading
import ConfigParser # read config file
import logging # for basic local logging
import logging.handlers # to remove file once size is exceeded
from os.path import isfile # checking for existing files
import Adafruit_CharLCD as LCD # lcd driver
import Adafruit_DHT # temp and humidity sensor driver
import Adafruit_BBIO.PWM as PWM # PWM
import smtplib # for sending mail
import urllib2 # to post data to gae
from itertools import cycle # for button toggle
print('Starting Up')
# # responsible for heartbeat
def runner():
print('Running')
global command_list
global onoff
global button_status
global latest_values
num = 0
GPIO.setup(pin_registry['led1'], GPIO.OUT)
sleep(5)
while True:
#flash heartbeat light
blink = .1
GPIO.output(pin_registry['led1'], GPIO.HIGH)
sleep(blink)
GPIO.output(pin_registry['led1'], GPIO.LOW)
sleep(blink)
GPIO.output(pin_registry['led1'], GPIO.HIGH)
sleep(blink)
GPIO.output(pin_registry['led1'], GPIO.LOW)
sleep(1)
thread.start_new_thread(runner, ())
# wait for correct time
while datetime.date.today() < datetime.date(2015,04,9):
print('Waiting for Time')
sleep(5)
# global variables
directory = '/var/lib/cloud9/workspace/smartrelay/'
command_list = [1, 0, 1, 1] # 4 sources: button, logs, remote, current
latest_values = {
'voltage' : -1,
'current' : -1,
'temperature' : -1,
'battery_voltage' : -1,
'humidity' : -1,
'frequency' : -1
} # most recent values of sensor reading
cutoff_dict = {
'battery_voltage' : [0,500],
'current' : [0,500],
'frequency' : [0,500],
'humidity' : [0,500],
'temperature' : [0,500],
'voltage' : [0,500],
}
onoff = 'Off' # relay on or off
button_status = 0 # 1 = command toggle, 2 = reset all
pin_registry = {
'relay_output' : 'P9_14',
'relay_secondary' : 'P9_12',
'led1' : 'P9_16',
'led2' : 'P8_19',
'frequency_input' : 'P9_42',
'button_input' : 'P9_24',
'button_secondary' : 'P9_26',
'lcd_rs' : 'P8_17',
'lcd_en' : 'P8_15',
'lcd_d4' : 'P8_13',
'lcd_d5' : 'P8_11',
'lcd_d6' : 'P8_9',
'lcd_d7' : 'P8_7',
'lcd_backlight' : 'P8_7',
'temp_input' : 'P9_22',
'voltage_ain' : 'P9_36',
'battery_ain' : 'P9_40',
'current_ain' : 'P9_38'
}
frequency_watchdog = 0
# global setup
button_toggle = cycle(range(2))
ADC.setup()
class frequency_update(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
global latest_values
global pin_registry
global frequency_watchdog
self.frequency_time_to_measure = .25
def run(self):
global frequency_watchdog
# frequency measure
count = 0
end = time.time() + self.frequency_time_to_measure
try:
while end > time.time():
GPIO.wait_for_edge(pin_registry['frequency_input'], GPIO.RISING)
count += 1
except RuntimeError: # if the previous event failed
return
value = count/float(self.frequency_time_to_measure) - 4
if abs(value - 60) < 4:
latest_values['frequency'] = round(value,1)
frequency_watchdog += 1
# # the logger just takes the values and updates the global variables
def value_update():
print('Value Update Thread')
global latest_values
global pin_registry
# I/O init
GPIO.setup(pin_registry['frequency_input'], GPIO.IN)
# timer
time_to_measure = 4 # in seconds
counter = 0
print('Value Update Initialized')
while True:
# frequency
current_num = frequency_watchdog
thread = frequency_update()
thread.start()
thread.join(1)
if current_num == frequency_watchdog:
latest_values['frequency'] = 0
# voltage measure
voltage_stack = []
end = time.time() + time_to_measure
while end > time.time():
voltage_stack.append(ADC.read(pin_registry['voltage_ain']))
# print voltage_stack
value = max(voltage_stack)
value = value * 349.514902442
if value < 1:
value = 0
latest_values['voltage'] = round(value,1)
# amps measure
current_stack = []
end = time.time() + time_to_measure
while end > time.time():
current_stack.append(ADC.read(pin_registry['current_ain']))
value = max(current_stack)
value *= 1.8
if value < .03:
value = 0
value *= 10
latest_values['current'] = round(value,1)
counter += 1
# print '{},{}'.format(latest_values['voltage'], counter)
sleep(5)
# # always checking to see if the device needs to shutoff
def commander():
print('Commander Thread')
global command_list
global onoff
global button_status
global latest_values
global pin_registry
global cutoff_dict
# init
GPIO.setup(pin_registry['relay_output'], GPIO.OUT)
sleep(9) # delay to allow other commands to init
print('Commander Thread Initialized')
while True:
# basic shutoff check
if (command_list.count(0) > 0):
GPIO.output(pin_registry['relay_output'], GPIO.LOW)
GPIO.output(pin_registry['relay_secondary'], GPIO.LOW)
GPIO.output(pin_registry['led2'], GPIO.LOW)
onoff = 'Off'
else:
GPIO.output(pin_registry['relay_output'], GPIO.HIGH)
GPIO.output(pin_registry['relay_secondary'], GPIO.HIGH)
GPIO.output(pin_registry['led2'], GPIO.HIGH)
onoff = 'On'
# check if values are out of range
# if out of thresh(from config) turn off until return
# if out of thresh for current kill until further notice
trip_count = 0
current_trip = False
for item in cutoff_dict:
thresh_in = cutoff_dict[item]
item_v = latest_values[item]
if item_v > int(thresh_in[1]) or item_v < int(thresh_in[0]):
trip_count += 1
if item == 'current':
current_trip = True
if trip_count > 0:
command_list[1] = 0
else:
command_list[1] = 1
if current_trip:
command_list[3] = 0
# check if button has something to say
# basic on/off 1
# hard reset 2 (cloud also needs to be able to)
if button_status == 1:
command_list[0] = button_toggle.next()
button_status = 0
elif button_status == 2:
command_list[2] = 1 # reset remote
command_list[3] = 1 # reset current
button_status = 0
sleep(1)
# # button thread to handle button commands
def button_interrupt():
print('Button Thread')
global button_status
global pin_registry
count = 0
# init
GPIO.setup(pin_registry['button_input'], GPIO.IN)
while True:
GPIO.wait_for_edge(pin_registry['button_input'], GPIO.RISING)
# waiting for hit
while(GPIO.input(pin_registry['button_input'])):
count += 1
# debounce, determine hit or hold
if count > 50000:
button_status = 2
elif count > 1000:
button_status = 1
count = 0
sleep(1)
# # this thread handles dumb basic logging, also updates things that won't be changing very quickly
def logger():
print('Logging Thread')
global pin_registry
global latest_values
global onoff
global command_list
# log init
LOG_FILENAME = directory + 'data.log'
# Set up a specific logger with our desired output level
my_logger = logging.getLogger('MyLogger')
my_logger.setLevel(logging.DEBUG)
# Add the log message handler to the logger
handler = logging.handlers.RotatingFileHandler(
LOG_FILENAME, maxBytes=250000000, backupCount=3)
my_logger.addHandler(handler)
# lcd init
lcd_columns = 16
lcd_rows = 2
slide = 2
global lcd
lcd = LCD.Adafruit_CharLCD(
pin_registry['lcd_rs'],
pin_registry['lcd_en'],
pin_registry['lcd_d4'],
pin_registry['lcd_d5'],
pin_registry['lcd_d6'],
pin_registry['lcd_d7'],
lcd_columns,
lcd_rows,
pin_registry['lcd_backlight']
)
lcd.message('Booting Up...')
# I/O init
GPIO.setup(pin_registry['led2'], GPIO.OUT)
GPIO.setup(pin_registry['button_secondary'], GPIO.IN)
GPIO.setup(pin_registry['relay_output'], GPIO.OUT)
GPIO.setup(pin_registry['relay_secondary'], GPIO.OUT)
sleep(10)
print('Logging Thread Initialized')
r1 = 0
while True:
# get battery voltage
value = ADC.read(pin_registry['battery_ain'])
value = value * 1.8 * 10
value = round(value,2)
latest_values['battery_voltage'] = round(value,1)
# rint value
# get temp
humidity, temp = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, pin_registry['temp_input'])
if humidity == None:
humidity = -1;
if temp == None:
temp = -1;
else:
temp = 9.0/5.0 * temp + 32
latest_values['temperature'] = round(temp,1)
latest_values['humidity'] = round(humidity,1)
# create logs
newLog = str(datetime.datetime.today())
for variable, value in latest_values.iteritems():
newLog += ', ' + str(value)
my_logger.info(newLog)
sleep(2)
# update lcd
lcd.clear()
# lcd.message('{},{}'.format(latest_values['current'],r1))
if onoff == 'Off':
lcd.message('Btn:{} Amp:{}\nInt:{} Thresh:{}'.format(command_list[0],command_list[3],command_list[2],command_list[1]))
elif slide == 1:
lcd.message('Temp:' + '{0:0.1f}*F'.format(temp) + '\nBat Volt: ' + str(latest_values['battery_voltage']))
slide += 1
elif slide == 2:
lcd.message('Voltage:' + str(latest_values['voltage']) + '\nCurrent: ' + str(latest_values['current']))
slide = 1
sleep(13)
# # this thread handles cloud logging and pulling commands
def cloud_logger():
print 'Cloud Thread'
# Dev Cloud Thread
global latest_values
global command_list
global onoff
global cutoff_dict
request_stack = []
sleep(30)
print 'Cloud Thread Initialized'
while True:
#build log
if onoff == 'On':
state = True
else:
state = False
urlstring = 'https://smart-relay.appspot.com/post?timestamp={}&temperature={}&humidity={}&voltage={}¤t={}&battery_voltage={}&frequency={}&state={}&password={}'
request = urlstring.format(
str(time.time()),
latest_values['temperature'],
latest_values['humidity'],
latest_values['voltage'],
latest_values['current'],
latest_values['battery_voltage'],
latest_values['frequency'],
state,
'my_password'
)
if len(request_stack) < 100:
request_stack.append(request)
response = ''
for r in list(request_stack):
try:
response = urllib2.urlopen(r).read()
request_stack.remove(r)
time.sleep(5)
except urllib2.URLError:
print 'Cloud Thread Failed'
break
else:
response_list = response.split(',')
if 'rue' in response_list[0]:
command_list[2] = 1
else:
command_list[2] = 0
int_list = []
for r in response_list:
if 'rue' not in r and 'alse' not in r:
int_list.append(int(r))
cutoff_dict['battery_voltage'] = [int_list[0],int_list[1]]
cutoff_dict['current'] = [int_list[2],int_list[3]]
cutoff_dict['frequency'] = [int_list[4],int_list[5]]
cutoff_dict['humidity'] = [int_list[6],int_list[7]]
cutoff_dict['temperature'] = [int_list[8],int_list[9]]
cutoff_dict['voltage'] = [int_list[10],int_list[11]]
sleep(60)
print('Initialized')
# start threads
thread.start_new_thread(logger, ())
thread.start_new_thread(cloud_logger, ())
thread.start_new_thread(button_interrupt, ())
thread.start_new_thread(commander, ())
thread.start_new_thread(value_update, ())
thread.start_new_thread(frequency_update, ())
print('Threads Started')
# for when being run by cron job
while True:
sleep(60)
raw_input("Press Enter to kill program\n")
PWM.cleanup()
lcd.clear()
print('Done')
|
|
"""
Run many trilegal simulations and cull scaled LF functions to compare with data
"""
import argparse
import logging
import numpy as np
import os
import sys
import time
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pylab as plt
import ResolvedStellarPops as rsp
from IPython import parallel
from ..pop_synth.stellar_pops import normalize_simulation, rgb_agb_regions
from ..plotting.plotting import model_cmd_withasts
from star_formation_histories import StarFormationHistories
<<<<<<< HEAD
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
__all__ = ['VarySFHs', 'run_once']
def initialize_inputs():
return {'ast_corr': True,
'cmd_input_file': 'cmd_input_parsecCAF09_V1.2S_M36_S12D2_MAR13.dat',
'extra_str': '',
'file_origin': None,
'filter1': None,
'filter2': None,
'galaxy_input': None,
'bins': None,
'trgb': None,
'Mtrgb': None,
'offset': -2.,
'outfile_loc': os.getcwd(),
'photsys': None,
'sfh_file': None,
'target': None,
'trgb_exclude': .1}
=======
def load_lf_file(lf_file):
with open(lf_file, 'r') as lff:
lines = [l.strip() for l in lff.readlines() if not l.startswith('#')]
hists = [np.array(l.split(), dtype=float) for l in lines[0::2]]
binss = [np.array(l.split(), dtype=float) for l in lines[1::2]]
return hists, binss
>>>>>>> master
class VarySFHs(StarFormationHistories):
'''
run several variations of the age sfr z from MATCH SFH to produce
simulated CMDs, LFs, and nagb/nrgb ratios.
because the trilegal output can be so huge, there are many constraints
needed to cull the model output down to what is necessary for the
analysis.
'''
def __init__(self, inp_obj=None, input_file=None, kwargs={}):
'''
galaxy_input is a template.
'''
# load SFH instance to make lots of trilegal runs
self.input_file = input_file
if input_file is not None:
kwargs.update(rsp.fileio.load_input(input_file))
indict = dict(initialize_inputs.items() + kwargs.items())
if inp_obj is not None:
indict = inp_obj.__dict__
<<<<<<< HEAD
if inp_obj.nsfhs > 1:
StarFormationHistories.__init__(self, inp_obj.hmc_file,
inp_obj.file_origin)
[self.__setattr__(k, v) for k, v in indict.items()]
cmd_input_file = os.path.split(self.cmd_input_file)[1]
self.agb_mod = \
cmd_input_file.replace('.dat', '').lower().replace('cmd_input_', '')
=======
default_kwargs = dict({'agb_mod': None,
'Av': 0.,
'cmd_input_file': None,
'dmod': 0.,
'extra_str': '',
'file_origin': None,
'opt_filter1': None,
'opt_filter2': None,
'ir_filter1': None,
'ir_filter2': None,
'galaxy_input': None,
'ir_bins': None,
'ir_color_min': -99.,
'ir_trgb': None,
'just_once': False,
'Mtrgb': None,
'nir_rgb': None,
'nopt_rgb': None,
'nsfhs': None,
'offsets': [-2., -1.5],
'opt_bins': None,
'opt_color_min': -99.,
'opt_trgb': None,
'outfile_loc': os.getcwd(),
'photsys': None,
'sfh_file': None,
'target': None,
'trgb_excludes': [.1, .2]}.items()
+ kwargs.items())
[self.__setattr__(k, v) for k, v in default_kwargs.items()]
if not self.just_once:
StarFormationHistories.__init__(self, self.sfh_file,
self.file_origin)
if self.agb_mod is None:
self.agb_mod = \
os.path.split(self.cmd_input_file)[1].replace('.dat', '').lower()
if None in [self.ir_trgb, self.opt_trgb]:
if self.Mtrgb is not None:
self.ir_trgb = rsp.astronomy_utils.Mag2mag(self.Mtrgb,
self.ir_filter2,
self.photsys,
dmod=self.dmod,
Av=self.Av)
self.opt_trgb = rsp.astronomy_utils.Mag2mag(self.Mtrgb,
self.opt_filter2,
self.photsys,
dmod=self.dmod,
Av=self.Av)
def prepare_outfiles(self):
# setup the locations all the files to write and read from
self.fnames = setup_files(self.agb_mod, self.target, self.outfile_loc,
extra_str=self.extra_str)
# header files are needed in two cases
# nagb/nrgb ratio file
self.narratio_header = '# target nopt_rgb nopt_agb nir_rgb nir_agb '
self.narratio_header += 'opt_ar_ratio ir_ar_ratio opt_ar_ratio_err '
self.narratio_header += 'ir_ar_ratio_err \n'
# contamination of phases in rgb and agb region file
# (changing the self.regions will disrupt calculation of
# rheb_eagb_contamination -- see contamination_by_phases code)
self.regions = ['MS', 'RGB', 'HEB', 'BHEB', 'RHEB', 'EAGB', 'TPAGB']
self.contam_header = '# %s %s \n' % (' '.join(self.regions),'Total')
>>>>>>> master
def prepare_galaxy_input(self, object_mass=None, dry_run=False):
'''
write the galaxy input file from a previously written template.
simply overwrites the filename line to link to the new sfr
file.
'''
self.galaxy_inputs = []
galaxy_input = self.galaxy_input
ext = '.' + galaxy_input.split('.')[-1]
lines = open(galaxy_input).readlines()
# line that links to sfr file.
extra = ' '.join(lines[-3].split(' ')[1:])
if object_mass is not None:
extra2 = ' '.join(lines[-6].split()[1:])
for i in range(len(self.sfr_files)):
lines[-3] = ' '.join([self.sfr_files[i], extra])
if object_mass is not None:
lines[-6] = ' '.join(['%.4e' % object_mass, extra2]) + '\n'
new_name = os.path.split(galaxy_input)[1].replace(ext, '_%003i' % i + ext)
new_out = os.path.join(self.outfile_loc, new_name)
if dry_run is False:
with open(new_out, 'w') as f:
f.write(''.join(lines))
logger.info('wrote {}'.format(new_out))
self.galaxy_inputs.append(new_out)
def vary_the_SFH(self, random_sfr=True, random_z=False,
zdisp=True, dry_run=False, object_mass=None):
'''make the sfhs, make the galaxy inputs'''
new_fmt = '{}{}_tri_%003i.sfr'.format(self.target, self.filter1)
outfile_fmt = os.path.join(self.outfile_loc, new_fmt)
self.sfr_files = self.make_many_trilegal_sfhs(nsfhs=self.nsfhs,
outfile_fmt=outfile_fmt,
random_sfr=random_sfr,
random_z=random_z,
zdisp=zdisp,
dry_run=dry_run)
self.prepare_galaxy_input(dry_run=dry_run, object_mass=object_mass)
return
def run(self, do_norm=True, dry_run=False, is_parallel=False, do_norm_kw={}):
"""call run_once and write results if normalization happened"""
do_norm_kw = dict(self.__dict__.items() + do_norm_kw.items())
try:
del do_norm_kw['sgal']
except:
pass
result = run_once(do_norm=do_norm, dry_run=dry_run,
do_norm_kw=do_norm_kw)
self.ast_corr = False
if result[1] is True:
self.ast_corr = True
final_result = result[0]
if not is_parallel:
if do_norm:
filter2 = self.filter2
filter1 = self.filter1
if self.ast_corr:
filter2 = '%s_cor' % self.filter2
filter1 = '%s_cor' % self.filter1
fdict = write_results(result[0], self.agb_mod, self.target,
self.outfile_loc, filter2, filter1,
extra_str=self.extra_str)
[self.__setattr__(k, v) for k, v in fdict.items()]
final_result = self.__dict__
return final_result
def run_parallel(self, do_norm=True, dry_run=False, max_proc=8, start=30,
timeout=45, cleanup=False):
"""
Call self.run in parallel... or if only self.nsfhs == 1, hop out and
do not run in parallel.
"""
def setup_parallel():
"""
I would love a better way to do this.
"""
clients = parallel.Client()
clients.block = False
clients[:].use_dill()
clients[:].execute('import ResolvedStellarPops as rsp')
clients[:].execute('import numpy as np')
clients[:].execute('import os')
clients[:].execute('import matplotlib.pylab as plt')
clients[:].execute('import logging')
clients[:]['do_normalization'] = do_normalization
clients[:]['load_trilegal_catalog'] = load_trilegal_catalog
clients[:]['run_once'] = run_once
clients[:]['normalize_simulation'] = normalize_simulation
clients[:]['rgb_agb_regions'] = rgb_agb_regions
clients[:]['contamination_by_phases'] = contamination_by_phases
clients[:]['gather_results'] = gather_results
clients[:]['write_results'] = write_results
clients[:]['model_cmd_withasts'] = model_cmd_withasts
clients[:]['logger'] = logger
return clients
# trilegal output format
tname = os.path.join(self.outfile_loc,
'output_%s_%s_%s_%s' % (self.target, self.filter1,
self.filter2, self.agb_mod))
triout_fmt = tname + '_%003i.dat'
if self.nsfhs <= 1:
# don't run parallel.
do_norm_kw = {'galaxy_input': self.galaxy_input,
'triout': tname + '_bestsfr.dat'}
self.run(do_norm=do_norm, dry_run=dry_run, do_norm_kw=do_norm_kw)
out_obj = rsp.fileio.InputParameters(default_dict=initialize_inputs())
out_obj.add_params(self.__dict__)
out_obj.write_params(self.input_file.replace('.vsfhinp', '.plotinp'))
return
# check for clusters.
try:
clients = parallel.Client()
except IOError:
logger.debug('Starting ipcluster... waiting {} s for spin up'.format(start))
os.system('ipcluster start --n={} &'.format(max_proc))
time.sleep(start)
# make a new "input file" for use in plotting or as a log
outfile = os.path.join(self.outfile_loc,
os.path.split(self.input_file)[1])
outparam = rsp.fileio.replace_ext(outfile, '_inp_%003i.dat')
out_obj = rsp.fileio.InputParameters(default_dict=initialize_inputs())
out_obj.add_params(self.__dict__, loud=True)
# create the sfr and galaxy input files
self.vary_the_SFH(random_sfr=True, random_z=False,
zdisp=True, dry_run=dry_run, object_mass=None)
# find looping parameters. How many sets of calls to the max number of
# processors
niters = np.ceil(self.nsfhs / float(max_proc))
sets = np.arange(niters * max_proc, dtype=int).reshape(niters, max_proc)
# in case it takes more than 45 s to spin up clusters, set up as
# late as possible
clients = setup_parallel()
logger.debug('ready to go!')
for j, iset in enumerate(sets):
# don't use not needed procs
iset = iset[iset < self.nsfhs]
# parallel call to run
do_norm_kws = [{'galaxy_input': self.galaxy_inputs[i],
'triout': triout_fmt % i} for i in iset]
res = [clients[i].apply(self.run, do_norm, dry_run, True, do_norm_kws[i],)
for i in range(len(iset))]
logger.debug('waiting on set {} of {}'.format(j, niters))
while False in [r.ready() for r in res]:
time.sleep(1)
logger.debug('set {} complete'.format(j))
<<<<<<< HEAD
if do_norm:
for r in res:
logging.info(r.result)
filter2 = self.filter2
filter1 = self.filter1
if self.ast_corr:
filter2 = '{}_cor'.format(self.filter2)
filter1 = '{}_cor'.format(self.filter1)
fdict = write_results(r.result, self.agb_mod, self.target,
self.outfile_loc, filter2, filter1,
extra_str=self.extra_str)
r.result.update(fdict)
# to eliminate clutter
if cleanup:
for i in iset:
if i != self.nsfhs - 1:
if os.path.isfile(triout_fmt % i):
os.remove(triout_fmt % i)
# write the new "input file"
for i in range(len(res)):
out_obj.add_params(res[i].result)
out_obj.write_params(outparam % i, loud=True)
#os.system('ipcluster stop')
def contamination_by_phases(sgal, srgb, sagb, filter2, diag_plot=False,
color_cut=None, target='', line=''):
"""
contamination by other phases than rgb and agb
"""
regions = ['MS', 'RGB', 'HEB', 'BHEB', 'RHEB', 'EAGB', 'TPAGB']
if line == '':
line += '# %s %s \n' % (' '.join(regions), 'Total')
sgal.all_stages()
indss = [sgal.__getattribute__('i%s' % r.lower()) for r in regions]
try:
if np.sum(indss) == 0:
msg = 'No stages in StarPop. Run trilegal with -l flag'
logger.warning(msg)
return '{}\n'.format(msg)
except:
pass
if diag_plot is True:
fig, ax = plt.subplots()
if color_cut is None:
inds = np.arange(len(sgal.data[filter2]))
else:
inds = color_cut
mag = sgal.data[filter2][inds]
ncontam_rgb = [list(set(s) & set(inds) & set(srgb)) for s in indss]
ncontam_agb = [list(set(s) & set(inds) & set(sagb)) for s in indss]
rheb_eagb_contam = len(ncontam_rgb[4]) + len(ncontam_rgb[5])
frac_rheb_eagb = float(rheb_eagb_contam) / \
float(np.sum([len(n) for n in ncontam_rgb]))
heb_rgb_contam = len(ncontam_rgb[2])
frac_heb_rgb_contam = float(heb_rgb_contam) / \
float(np.sum([len(n) for n in ncontam_rgb]))
mags = [mag[n] if len(n) > 0 else np.zeros(10) for n in ncontam_rgb]
mms = np.concatenate(mags)
ms, = np.nonzero(mms > 0)
bins = np.linspace(np.min(mms[ms]), np.max(mms[ms]), 10)
if diag_plot is True:
[ax.hist(mags, bins=bins, alpha=0.5, stacked=True,
label=regions)]
nrgb_cont = np.array([len(n) for n in ncontam_rgb], dtype=int)
nagb_cont = np.array([len(n) for n in ncontam_agb], dtype=int)
line += 'rgb %s %i \n' % ( ' '.join(map(str, nrgb_cont)), np.sum(nrgb_cont))
line += 'agb %s %i \n' % (' '.join(map(str, nagb_cont)), np.sum(nagb_cont))
line += '# rgb eagb contamination: %i \n' % rheb_eagb_contam
line += '# frac of total in rgb region: %.3f \n' % frac_rheb_eagb
line += '# rc contamination: %i \n' % heb_rgb_contam
line += '# frac of total in rgb region: %.3f \n' % frac_heb_rgb_contam
logger.info(line)
if diag_plot is True:
ax.legend(numpoints=1, loc=0)
ax.set_title(target)
plt.savefig('contamination_%s.png' % target, dpi=150)
return line
def do_normalization(filter1=None, filter2=None, sgal=None, triout=None,
offset=None, trgb_exclude=None, trgb=None, ast_corr=False,
col_min=None, col_max=None, nrgbs=None, **kwargs):
'''Do the normalization and save small part of outputs.'''
if sgal is None:
assert triout is not None, \
'need sgal loaded or pass trilegal catalog file name'
sgal = load_trilegal_catalog(triout, filter1, filter2)
if ast_corr:
if not filter1.endswith('cor'):
filter1 += '_cor'
if not filter2.endswith('cor'):
filter2 += '_cor'
# select rgb and agb regions
mag_bright = kwargs.get('mag_bright')
mag_faint = kwargs.get('mag_faint')
srgb, sagb = rgb_agb_regions(offset, trgb_exclude, trgb, sgal.data[filter2],
col_min=col_min, col_max=col_max,
mag_bright=mag_bright, mag_faint=mag_faint,
mag1=sgal.data[filter1])
# normalization
norm, inorm, rgb, agb = normalize_simulation(sgal.data[filter2], nrgbs,
srgb, sagb)
return sgal, norm, inorm, (srgb, sagb), (rgb, agb)
def run_once(cmd_input_file=None, galaxy_input=None, triout=None, rmfiles=False,
dry_run=False, do_norm=True, do_norm_kw={},
cols=None, filter1=None, filter2=None, target=None,
fake_file=None):
cmd_input_file = do_norm_kw.get('cmd_input_file', cmd_input_file)
galaxy_input = do_norm_kw.get('galaxy_input', galaxy_input)
filter1 = do_norm_kw.get('filter1', filter1)
filter2 = do_norm_kw.get('filter2', filter2)
target = do_norm_kw.get('target', target)
fake_file = do_norm_kw.get('fake_file', fake_file)
ast_corr = do_norm_kw.get('ast_corr', False)
diag_plot = do_norm_kw.get('diag_plot', False)
triout = do_norm_kw.get('triout', triout)
rsp.trilegal.utils.run_trilegal(cmd_input_file, galaxy_input, triout,
rmfiles=rmfiles, dry_run=dry_run)
if ast_corr is True and dry_run is False:
assert fake_file is not None, 'Need fake file for ast corrections'
logger.info('adding ast corrections to {}'.format(triout))
sgal = load_trilegal_catalog(triout, filter1, filter2, only_keys=None)
rsp.ast_correct_starpop(sgal, overwrite=True, outfile=triout,
fake_file=fake_file, diag_plot=False)
do_norm_kw['sgal'] = sgal
if do_norm:
do_norm_kw['triout'] = triout
sgal, norm, inorm, (srgb, sagb), (rgb, agb) = \
do_normalization(**do_norm_kw)
if ast_corr:
filter1 = '%s_cor' % filter1
filter2 = '%s_cor' % filter2
result_dict = gather_results(sgal, norm, target, filter1, filter2,
narratio_dict={'rgb': rgb, 'agb': agb,
'srgb': srgb, 'sagb': sagb,
'inorm': inorm})
result_dict['contam_line'] = contamination_by_phases(sgal, srgb, sagb,
filter2)
#if diag_plot:
#model_cmd_withasts(sgal, rgb=rgb, agb=agb, inorm=inorm,
# **do_norm_kw)
#import pdb; pdb.set_trace()
return result_dict, ast_corr
def load_trilegal_catalog(trilegal_output, filter1, filter2,
only_keys='default'):
'''read the trilegal cat.'''
if only_keys == 'default':
only_keys = ['logAge','MH','m_ini','logL','logTe','mM0','Av','mbol',
'Mact', 'stage', filter2, filter1, '%s_cor' % filter1,
'%s_cor' % filter2, 'CO']
elif type(only_keys) != list:
only_keys = None
sgal = rsp.SimGalaxy(trilegal_output, filter1=filter1, filter2=filter2,
only_keys=only_keys)
return sgal
def gather_results(sgal, norm, target, filter1, filter2,
mass_met=True, tpagb_lf=True, narratio_dict=None):
'''gather results into strings or lists of strings for writing.'''
result_dict = {}
if tpagb_lf:
result_dict['lf_line'] = '# norm mag2 mag1 rgb agb srgb sagb inorm\n' + \
'\n'.join(['%.4f' % norm,
' '.join(['%g' % m for m in sgal.data[filter2]]),
' '.join(['%g' % m for m in sgal.data[filter1]]),
' '.join(['%i' % m for m in narratio_dict['rgb']]),
' '.join(['%i' % m for m in narratio_dict['agb']]),
' '.join(['%i' % m for m in narratio_dict['srgb']]),
' '.join(['%i' % m for m in narratio_dict['sagb']]),
' '.join(['%i' % m for m in narratio_dict['inorm']])])
if mass_met is True:
sgal.all_stages('TPAGB')
inds = sgal.itpagb
mag = sgal.mag2
key = 'mass_met_line'
mass = sgal.data.m_ini[inds]
mag = mag[inds]
mh = sgal.data.MH[inds]
result_dict[key] = \
'\n'.join([' '.join(['%g' % t for t in mag]),
' '.join(['%g' % t for t in mass]),
' '.join(['%.3f' % t for t in mh])])
# N agb/rgb ratio file
if len(narratio_dict) > 0:
narratio_fmt = '%(target)s %(nrgb)i %(nagb)i '
narratio_fmt += '%(ar_ratio).3f %(ar_ratio_err).3f'
rgb = narratio_dict['rgb']
agb = narratio_dict['agb']
nrgb = float(len(rgb))
nagb = float(len(agb))
out_dict = {'target': target,
'ar_ratio': nagb / nrgb,
'ar_ratio_err': rsp.utils.count_uncert_ratio(nagb, nrgb),
'nrgb': nrgb,
'nagb': nagb}
result_dict['narratio_line'] = narratio_fmt % out_dict
return result_dict
def write_results(res_dict, agb_mod, target, outfile_loc, filter2, filter1,
extra_str=''):
'''
Write results of VSFH output dict to files.
=======
def write_truncated_file(self, triout, cols=['F110W', 'F160W']):
def load_model(fname, cols=['F110W', 'F160W']):
# all the columns
with open(fname, 'r') as f:
col_keys = f.readline().strip().split()
# the columns I want
usecols = [col_keys.index(c) for c in cols]
data = np.genfromtxt(fname, usecols=usecols, names=cols)
return data
def write_model(fname, data):
header = '# %s \n' % ' '.join(data.dtype.names)
with open(fname, 'w') as f:
f.write(header)
np.savetxt(f, data, fmt='%.4f')
write_model(triout, load_model(triout, cols=cols))
def write_results(self, res_dict):
'''writes out the results to self.fnames (see __init__)'''
>>>>>>> master
Paramaters
----------
res_dict : dict
output of run_once keys with %s_line will be written to a file
agb_mod, target, filter2, extra_str : strings
file name formatting stings
outfile_loc : string
path to write output file
<<<<<<< HEAD
Returns
-------
fdict : dictionary
file and path to file
ex: lf_file: <path_to_lf_file>
'''
fmt = '%s_%s_%s_%s_%s%s.dat'
narratio_header = '# target nrgb nagb ar_ratio ar_ratio_err \n'
fdict = {}
for key, line in res_dict.items():
name = key.replace('_line', '')
fname = (fmt % (agb_mod, target, filter1, filter2, name, extra_str)).lower()
fname = os.path.join(outfile_loc, fname)
with open(fname, 'a') as fh:
if 'narratio' in key:
fh.write(narratio_header)
if type(line) == str:
line = [line]
[fh.write('%s \n' % l) for l in line]
fdict['%s_file' % name] = fname
return fdict
def main(argv):
"""
call vsfh.run_parallel with command line options and set up logger.
"""
parser = argparse.ArgumentParser(description="Run trilegal many times by \
randomly sampling SFH uncertainies")
parser.add_argument('-d', '--dry_run', action='store_true',
help='do not call trilegal')
parser.add_argument('-v', '--pdb', action='store_true',
help='debugging mode')
parser.add_argument('-n', '--nproc', type=int, default=8,
help='number of processors')
parser.add_argument('-c', '--cleanup', action='store_true',
help='remove large files when finished')
parser.add_argument('name', type=str, help='input file')
args = parser.parse_args(argv)
# set up logging
handler = logging.FileHandler('{}_vary_sfh.log'.format(args.name))
if args.pdb:
handler.setLevel(logging.DEBUG)
else:
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
inp_obj = rsp.fileio.InputParameters(default_dict=initialize_inputs())
inp_obj.input_file = args.name
inp_obj.add_params(rsp.fileio.load_input(inp_obj.input_file), loud=args.pdb)
vsh = VarySFHs(inp_obj=inp_obj)
if args.pdb:
import pdb
pdb.run(vsh.run_parallel(dry_run=args.dry_run, max_proc=args.nproc,
cleanup=args.cleanup))
else:
vsh.run_parallel(dry_run=args.dry_run, max_proc=args.nproc,
cleanup=args.cleanup)
if __name__ == '__main__':
main(sys.argv[1:])
=======
def load_lf_file(self, lf_file):
return load_lf_file(lf_file)
def contamination_by_phases(self, sopt_rgb, sopt_agb, sir_rgb, sir_agb,
diag_plot=False):
self.sgal.all_stages()
indss = [self.sgal.__getattribute__('i%s' % r.lower())
for r in self.regions]
line = ''
contam_line = []
if diag_plot is True:
fig, (axs) = plt.subplots(ncols=2)
for i, (rgb, agb, inds) in enumerate(zip([sopt_rgb, sir_rgb],
[sopt_agb, sir_agb],
[self.opt_color_cut,
self.ir_color_cut])):
if i == 1:
band = 'ir'
mag = self.sgal.data[self.ir_filter2][inds]
else:
band = 'opt'
mag = self.sgal.data[self.opt_filter2][inds]
ncontam_rgb = [list(set(s) & set(inds) & set(rgb)) for s in indss]
ncontam_agb = [list(set(s) & set(inds) & set(agb)) for s in indss]
rheb_eagb_contam = len(ncontam_rgb[4]) + len(ncontam_rgb[5])
frac_rheb_eagb = float(rheb_eagb_contam) / \
float(np.sum([len(n) for n in ncontam_rgb]))
heb_rgb_contam = len(ncontam_rgb[2])
frac_heb_rgb_contam = float(heb_rgb_contam) / \
float(np.sum([len(n) for n in ncontam_rgb]))
mags = [mag[n] if len(n) > 0 else np.zeros(10) for n in ncontam_rgb]
mms = np.concatenate(mags)
ms, = np.nonzero(mms > 0)
bins = np.linspace(np.min(mms[ms]), np.max(mms[ms]), 10)
if diag_plot is True:
[axs[i].hist(mags, bins=bins, alpha=0.5, stacked=True,
label=self.regions)]
nrgb_cont = np.array([len(n) for n in ncontam_rgb], dtype=int)
nagb_cont = np.array([len(n) for n in ncontam_agb], dtype=int)
line += 'rgb %s %s %i \n' % (band, ' '.join(map(str, nrgb_cont)),
np.sum(nrgb_cont))
line += 'agb %s %s %i \n' % (band, ' '.join(map(str, nagb_cont)),
np.sum(nagb_cont))
line += '# rgb eagb contamination: %i \n' % rheb_eagb_contam
line += '# frac of total in rgb region: %.3f \n' % frac_rheb_eagb
line += '# rc contamination: %i \n' % heb_rgb_contam
line += '# frac of total in rgb region: %.3f \n' % \
frac_heb_rgb_contam
print line
contam_line.append(line)
if diag_plot is True:
axs[0].legend(numpoints=1, loc=0)
axs[0].set_title(self.target)
plt.savefig('contamination_%s.png' % self.target, dpi=150)
return line
def narratio_table(self):
narratio_files = rsp.fileIO.get_files(self.outfile_dir, '*narratio*dat')
stats.narratio_table(narratio_files)
return
def chi2_stats(targets, cmd_inputs, outfile_dir='default', extra_str=''):
chi2_files = stats.write_chi2_table(targets, cmd_inputs,
outfile_loc=outfile_dir,
extra_str=extra_str)
chi2_dicts = stats.result2dict(chi2_files)
stats.chi2plot(chi2_dicts, outfile_loc=outfile_dir)
chi2_files = stats.write_chi2_table(targets, cmd_inputs,
outfile_loc=outfile_dir,
extra_str=extra_str,
just_gauss=True)
return
>>>>>>> master
|
|
#! /usr/bin/env python
""" Convert values between RGB hex codes and xterm-256 color codes.
Nice long listing of all 256 colors and their codes. Useful for
developing console color themes, or even script output schemes.
Resources:
* http://en.wikipedia.org/wiki/8-bit_color
* http://en.wikipedia.org/wiki/ANSI_escape_code
* /usr/share/X11/rgb.txt
I'm not sure where this script was inspired from. I think I must have
written it from scratch, though it's been several years now.
"""
__author__ = 'Micah Elliott http://MicahElliott.com'
__version__ = '0.1'
__copyright__ = 'Copyright (C) 2011 Micah Elliott. All rights reserved.'
__license__ = 'WTFPL http://sam.zoy.org/wtfpl/'
#---------------------------------------------------------------------
import sys, re
CLUT = [ # color look-up table
# 8-bit, RGB hex
# Primary 3-bit (8 colors). Unique representation!
('00', '000000'),
('01', '800000'),
('02', '008000'),
('03', '808000'),
('04', '000080'),
('05', '800080'),
('06', '008080'),
('07', 'c0c0c0'),
# Equivalent "bright" versions of original 8 colors.
('08', '808080'),
('09', 'ff0000'),
('10', '00ff00'),
('11', 'ffff00'),
('12', '0000ff'),
('13', 'ff00ff'),
('14', '00ffff'),
('15', 'ffffff'),
# Strictly ascending.
('16', '000000'),
('17', '00005f'),
('18', '000087'),
('19', '0000af'),
('20', '0000d7'),
('21', '0000ff'),
('22', '005f00'),
('23', '005f5f'),
('24', '005f87'),
('25', '005faf'),
('26', '005fd7'),
('27', '005fff'),
('28', '008700'),
('29', '00875f'),
('30', '008787'),
('31', '0087af'),
('32', '0087d7'),
('33', '0087ff'),
('34', '00af00'),
('35', '00af5f'),
('36', '00af87'),
('37', '00afaf'),
('38', '00afd7'),
('39', '00afff'),
('40', '00d700'),
('41', '00d75f'),
('42', '00d787'),
('43', '00d7af'),
('44', '00d7d7'),
('45', '00d7ff'),
('46', '00ff00'),
('47', '00ff5f'),
('48', '00ff87'),
('49', '00ffaf'),
('50', '00ffd7'),
('51', '00ffff'),
('52', '5f0000'),
('53', '5f005f'),
('54', '5f0087'),
('55', '5f00af'),
('56', '5f00d7'),
('57', '5f00ff'),
('58', '5f5f00'),
('59', '5f5f5f'),
('60', '5f5f87'),
('61', '5f5faf'),
('62', '5f5fd7'),
('63', '5f5fff'),
('64', '5f8700'),
('65', '5f875f'),
('66', '5f8787'),
('67', '5f87af'),
('68', '5f87d7'),
('69', '5f87ff'),
('70', '5faf00'),
('71', '5faf5f'),
('72', '5faf87'),
('73', '5fafaf'),
('74', '5fafd7'),
('75', '5fafff'),
('76', '5fd700'),
('77', '5fd75f'),
('78', '5fd787'),
('79', '5fd7af'),
('80', '5fd7d7'),
('81', '5fd7ff'),
('82', '5fff00'),
('83', '5fff5f'),
('84', '5fff87'),
('85', '5fffaf'),
('86', '5fffd7'),
('87', '5fffff'),
('88', '870000'),
('89', '87005f'),
('90', '870087'),
('91', '8700af'),
('92', '8700d7'),
('93', '8700ff'),
('94', '875f00'),
('95', '875f5f'),
('96', '875f87'),
('97', '875faf'),
('98', '875fd7'),
('99', '875fff'),
('100', '878700'),
('101', '87875f'),
('102', '878787'),
('103', '8787af'),
('104', '8787d7'),
('105', '8787ff'),
('106', '87af00'),
('107', '87af5f'),
('108', '87af87'),
('109', '87afaf'),
('110', '87afd7'),
('111', '87afff'),
('112', '87d700'),
('113', '87d75f'),
('114', '87d787'),
('115', '87d7af'),
('116', '87d7d7'),
('117', '87d7ff'),
('118', '87ff00'),
('119', '87ff5f'),
('120', '87ff87'),
('121', '87ffaf'),
('122', '87ffd7'),
('123', '87ffff'),
('124', 'af0000'),
('125', 'af005f'),
('126', 'af0087'),
('127', 'af00af'),
('128', 'af00d7'),
('129', 'af00ff'),
('130', 'af5f00'),
('131', 'af5f5f'),
('132', 'af5f87'),
('133', 'af5faf'),
('134', 'af5fd7'),
('135', 'af5fff'),
('136', 'af8700'),
('137', 'af875f'),
('138', 'af8787'),
('139', 'af87af'),
('140', 'af87d7'),
('141', 'af87ff'),
('142', 'afaf00'),
('143', 'afaf5f'),
('144', 'afaf87'),
('145', 'afafaf'),
('146', 'afafd7'),
('147', 'afafff'),
('148', 'afd700'),
('149', 'afd75f'),
('150', 'afd787'),
('151', 'afd7af'),
('152', 'afd7d7'),
('153', 'afd7ff'),
('154', 'afff00'),
('155', 'afff5f'),
('156', 'afff87'),
('157', 'afffaf'),
('158', 'afffd7'),
('159', 'afffff'),
('160', 'd70000'),
('161', 'd7005f'),
('162', 'd70087'),
('163', 'd700af'),
('164', 'd700d7'),
('165', 'd700ff'),
('166', 'd75f00'),
('167', 'd75f5f'),
('168', 'd75f87'),
('169', 'd75faf'),
('170', 'd75fd7'),
('171', 'd75fff'),
('172', 'd78700'),
('173', 'd7875f'),
('174', 'd78787'),
('175', 'd787af'),
('176', 'd787d7'),
('177', 'd787ff'),
('178', 'd7af00'),
('179', 'd7af5f'),
('180', 'd7af87'),
('181', 'd7afaf'),
('182', 'd7afd7'),
('183', 'd7afff'),
('184', 'd7d700'),
('185', 'd7d75f'),
('186', 'd7d787'),
('187', 'd7d7af'),
('188', 'd7d7d7'),
('189', 'd7d7ff'),
('190', 'd7ff00'),
('191', 'd7ff5f'),
('192', 'd7ff87'),
('193', 'd7ffaf'),
('194', 'd7ffd7'),
('195', 'd7ffff'),
('196', 'ff0000'),
('197', 'ff005f'),
('198', 'ff0087'),
('199', 'ff00af'),
('200', 'ff00d7'),
('201', 'ff00ff'),
('202', 'ff5f00'),
('203', 'ff5f5f'),
('204', 'ff5f87'),
('205', 'ff5faf'),
('206', 'ff5fd7'),
('207', 'ff5fff'),
('208', 'ff8700'),
('209', 'ff875f'),
('210', 'ff8787'),
('211', 'ff87af'),
('212', 'ff87d7'),
('213', 'ff87ff'),
('214', 'ffaf00'),
('215', 'ffaf5f'),
('216', 'ffaf87'),
('217', 'ffafaf'),
('218', 'ffafd7'),
('219', 'ffafff'),
('220', 'ffd700'),
('221', 'ffd75f'),
('222', 'ffd787'),
('223', 'ffd7af'),
('224', 'ffd7d7'),
('225', 'ffd7ff'),
('226', 'ffff00'),
('227', 'ffff5f'),
('228', 'ffff87'),
('229', 'ffffaf'),
('230', 'ffffd7'),
('231', 'ffffff'),
# Gray-scale range.
('232', '080808'),
('233', '121212'),
('234', '1c1c1c'),
('235', '262626'),
('236', '303030'),
('237', '3a3a3a'),
('238', '444444'),
('239', '4e4e4e'),
('240', '585858'),
('241', '626262'),
('242', '6c6c6c'),
('243', '767676'),
('244', '808080'),
('245', '8a8a8a'),
('246', '949494'),
('247', '9e9e9e'),
('248', 'a8a8a8'),
('249', 'b2b2b2'),
('250', 'bcbcbc'),
('251', 'c6c6c6'),
('252', 'd0d0d0'),
('253', 'dadada'),
('254', 'e4e4e4'),
('255', 'eeeeee'),
]
def _str2hex(hexstr):
return int(hexstr, 16)
def _strip_hash(rgb):
# Strip leading `#` if exists.
if rgb.startswith('#'):
rgb = rgb.lstrip('#')
return rgb
def _create_dicts():
short2rgb_dict = dict(CLUT)
rgb2short_dict = {}
for k, v in short2rgb_dict.items():
rgb2short_dict[v] = k
return rgb2short_dict, short2rgb_dict
def short2rgb(short):
return SHORT2RGB_DICT[short]
def print_all():
""" Print all 256 xterm color codes.
"""
for short, rgb in CLUT:
sys.stdout.write('\033[48;5;%sm%s:%s' % (short, short, rgb))
sys.stdout.write("\033[0m ")
sys.stdout.write('\033[38;5;%sm%s:%s' % (short, short, rgb))
sys.stdout.write("\033[0m\n")
print "Printed all codes."
print "You can translate a hex or 0-255 code by providing an argument."
def rgb2short(rgb):
""" Find the closest xterm-256 approximation to the given RGB value.
@param rgb: Hex code representing an RGB value, eg, 'abcdef'
@returns: String between 0 and 255, compatible with xterm.
>>> rgb2short('123456')
('23', '005f5f')
>>> rgb2short('ffffff')
('231', 'ffffff')
>>> rgb2short('0DADD6') # vimeo logo
('38', '00afd7')
"""
rgb = _strip_hash(rgb)
incs = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff)
# Break 6-char RGB code into 3 integer vals.
parts = [ int(h, 16) for h in re.split(r'(..)(..)(..)', rgb)[1:4] ]
res = []
for part in parts:
i = 0
while i < len(incs)-1:
s, b = incs[i], incs[i+1] # smaller, bigger
if s <= part <= b:
s1 = abs(s - part)
b1 = abs(b - part)
if s1 < b1: closest = s
else: closest = b
res.append(closest)
break
i += 1
#print '***', res
res = ''.join([ ('%02.x' % i) for i in res ])
equiv = RGB2SHORT_DICT[ res ]
#print '***', res, equiv
return equiv, res
RGB2SHORT_DICT, SHORT2RGB_DICT = _create_dicts()
#---------------------------------------------------------------------
if __name__ == '__main__':
import doctest
doctest.testmod()
if len(sys.argv) == 1:
print_all()
raise SystemExit
arg = sys.argv[1]
if len(arg) < 4 and int(arg) < 256:
rgb = short2rgb(arg)
sys.stdout.write('xterm color \033[38;5;%sm%s\033[0m -> RGB exact \033[38;5;%sm%s\033[0m' % (arg, arg, arg, rgb))
sys.stdout.write("\033[0m\n")
else:
short, rgb = rgb2short(arg)
sys.stdout.write('RGB %s -> xterm color approx \033[38;5;%sm%s (%s)' % (arg, short, short, rgb))
sys.stdout.write("\033[0m\n")
|
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Service providing handler
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.5.8
:status: Beta
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 5, 8)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# Pelix beans
from pelix.constants import BundleActivator, BundleException
# iPOPO constants
import pelix.ipopo.constants as ipopo_constants
import pelix.ipopo.handlers.constants as constants
# Standard library
import logging
# ------------------------------------------------------------------------------
class _HandlerFactory(constants.HandlerFactory):
"""
Factory service for service registration handlers
"""
def get_handlers(self, component_context, instance):
"""
Sets up service providers for the given component
:param component_context: The ComponentContext bean
:param instance: The component instance
:return: The list of handlers associated to the given component
"""
# Retrieve the handler configuration
provides = component_context.get_handler(
ipopo_constants.HANDLER_PROVIDES)
if not provides:
# Nothing to do
return tuple()
# 1 handler per provided service
return [ServiceRegistrationHandler(specs, controller)
for specs, controller in provides]
@BundleActivator
class _Activator(object):
"""
The bundle activator
"""
def __init__(self):
"""
Sets up members
"""
self._registration = None
def start(self, context):
"""
Bundle started
"""
# Set up properties
properties = {constants.PROP_HANDLER_ID:
ipopo_constants.HANDLER_PROVIDES}
# Register the handler factory service
self._registration = context.register_service(
constants.SERVICE_IPOPO_HANDLER_FACTORY,
_HandlerFactory(), properties)
def stop(self, context):
"""
Bundle stopped
"""
# Unregister the service
self._registration.unregister()
self._registration = None
# ------------------------------------------------------------------------------
class ServiceRegistrationHandler(constants.ServiceProviderHandler):
"""
Handles the registration of a service provided by a component
"""
def __init__(self, specifications, controller_name):
"""
Sets up the handler
:param specifications: The service specifications
:param controller_name: Name of the associated service controller
(can be None)
"""
self.specifications = specifications
self.__controller = controller_name
self._ipopo_instance = None
# Controller is "on" by default
self.__controller_on = True
self.__validated = False
# The ServiceRegistration and ServiceReference objects
self._registration = None
self._svc_reference = None
def _field_controller_generator(self):
"""
Generates the methods called by the injected controller
"""
# Local variable, to avoid messing with "self"
stored_instance = self._ipopo_instance
def get_value(self, name):
"""
Retrieves the controller value, from the iPOPO dictionaries
:param name: The property name
:return: The property value
"""
return stored_instance.get_controller_state(name)
def set_value(self, name, new_value):
"""
Sets the property value and trigger an update event
:param name: The property name
:param new_value: The new property value
"""
# Get the previous value
old_value = stored_instance.get_controller_state(name)
if new_value != old_value:
# Update the controller state
stored_instance.set_controller_state(name, new_value)
return new_value
return get_value, set_value
def manipulate(self, stored_instance, component_instance):
"""
Manipulates the component instance
:param stored_instance: The iPOPO component StoredInstance
:param component_instance: The component instance
"""
# Store the stored instance
self._ipopo_instance = stored_instance
if self.__controller is None:
# No controller: do nothing
return
# Get the current value of the member (True by default)
controller_value = getattr(component_instance, self.__controller, True)
# Store the controller value
stored_instance.set_controller_state(self.__controller,
controller_value)
# Prepare the methods names
getter_name = "{0}{1}" \
.format(ipopo_constants.IPOPO_CONTROLLER_PREFIX,
ipopo_constants.IPOPO_GETTER_SUFFIX)
setter_name = "{0}{1}" \
.format(ipopo_constants.IPOPO_CONTROLLER_PREFIX,
ipopo_constants.IPOPO_SETTER_SUFFIX)
# Inject the getter and setter at the instance level
getter, setter = self._field_controller_generator()
setattr(component_instance, getter_name, getter)
setattr(component_instance, setter_name, setter)
def check_event(self, svc_event):
"""
Tests if the given service event corresponds to the registered service
:param svc_event: A service event
:return: True if the given event references the provided service
"""
return self._svc_reference is not svc_event.get_service_reference()
def get_kinds(self):
"""
Retrieves the kinds of this handler: 'service_provider'
:return: the kinds of this handler
"""
return constants.KIND_SERVICE_PROVIDER,
def get_service_reference(self):
"""
Retrieves the reference of the provided service
:return: A ServiceReference object
"""
return self._svc_reference
def on_controller_change(self, name, value):
"""
Called by the instance manager when a controller value has been
modified
:param name: The name of the controller
:param value: The new value of the controller
"""
if self.__controller != name:
# Nothing to do
return
# Update the controller value
self.__controller_on = value
if value:
# Controller switched to "ON"
self._register_service()
else:
# Controller switched to "OFF"
self._unregister_service()
def on_property_change(self, name, old_value, new_value):
"""
Called by the instance manager when a component property is modified
:param name: The changed property name
:param old_value: The previous property value
:param new_value: The new property value
"""
if self._registration is not None:
# use the registration to trigger the service event
self._registration.set_properties({name: new_value})
def post_validate(self):
"""
Called by the instance manager once the component has been validated
"""
# Update the validation flag
self.__validated = True
self._register_service()
def pre_invalidate(self):
"""
Called by the instance manager before the component is invalidated
"""
# Update the validation flag
self.__validated = False
# Force service unregistration
self._unregister_service()
def _register_service(self):
"""
Registers the provided service, if possible
"""
if self._registration is None and self.specifications \
and self.__validated and self.__controller_on:
# Use a copy of component properties
properties = self._ipopo_instance.context.properties.copy()
bundle_context = self._ipopo_instance.bundle_context
# Register the service
self._registration = bundle_context.register_service(
self.specifications, self._ipopo_instance.instance, properties)
self._svc_reference = self._registration.get_reference()
def _unregister_service(self):
"""
Unregisters the provided service, if needed
"""
if self._registration is not None:
# Ignore error
try:
self._registration.unregister()
except BundleException as ex:
# Only log the error at this level
logger = logging.getLogger('-'.join((self._ipopo_instance.name,
'ServiceRegistration')))
logger.error("Error unregistering a service: %s", ex)
self._registration = None
self._svc_reference = None
|
|
'''
Entry point module (keep at root):
Used to run with tests with unittest/pytest/nose.
'''
import os
try:
xrange
except:
xrange = range
def main():
import sys
# Separate the nose params and the pydev params.
pydev_params = []
other_test_framework_params = []
found_other_test_framework_param = None
NOSE_PARAMS = '--nose-params'
PY_TEST_PARAMS = '--py-test-params'
for arg in sys.argv[1:]:
if not found_other_test_framework_param and arg != NOSE_PARAMS and arg != PY_TEST_PARAMS:
pydev_params.append(arg)
else:
if not found_other_test_framework_param:
found_other_test_framework_param = arg
else:
other_test_framework_params.append(arg)
# Here we'll run either with nose or with the pydev_runfiles.
from _pydev_runfiles import pydev_runfiles
from _pydev_runfiles import pydev_runfiles_xml_rpc
from _pydevd_bundle import pydevd_constants
from pydevd_file_utils import _NormFile
DEBUG = 0
if DEBUG:
sys.stdout.write('Received parameters: %s\n' % (sys.argv,))
sys.stdout.write('Params for pydev: %s\n' % (pydev_params,))
if found_other_test_framework_param:
sys.stdout.write('Params for test framework: %s, %s\n' % (found_other_test_framework_param, other_test_framework_params))
try:
configuration = pydev_runfiles.parse_cmdline([sys.argv[0]] + pydev_params)
except:
sys.stderr.write('Command line received: %s\n' % (sys.argv,))
raise
pydev_runfiles_xml_rpc.initialize_server(configuration.port) # Note that if the port is None, a Null server will be initialized.
NOSE_FRAMEWORK = 1
PY_TEST_FRAMEWORK = 2
try:
if found_other_test_framework_param:
test_framework = 0 # Default (pydev)
if found_other_test_framework_param == NOSE_PARAMS:
import nose
test_framework = NOSE_FRAMEWORK
elif found_other_test_framework_param == PY_TEST_PARAMS:
import pytest
test_framework = PY_TEST_FRAMEWORK
else:
raise ImportError()
else:
raise ImportError()
except ImportError:
if found_other_test_framework_param:
sys.stderr.write('Warning: Could not import the test runner: %s. Running with the default pydev unittest runner instead.\n' % (
found_other_test_framework_param,))
test_framework = 0
# Clear any exception that may be there so that clients don't see it.
# See: https://sourceforge.net/tracker/?func=detail&aid=3408057&group_id=85796&atid=577329
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
if test_framework == 0:
return pydev_runfiles.main(configuration) # Note: still doesn't return a proper value.
else:
# We'll convert the parameters to what nose or py.test expects.
# The supported parameters are:
# runfiles.py --config-file|-t|--tests <Test.test1,Test2> dirs|files --nose-params xxx yyy zzz
# (all after --nose-params should be passed directly to nose)
# In java:
# --tests = Constants.ATTR_UNITTEST_TESTS
# --config-file = Constants.ATTR_UNITTEST_CONFIGURATION_FILE
# The only thing actually handled here are the tests that we want to run, which we'll
# handle and pass as what the test framework expects.
py_test_accept_filter = {}
files_to_tests = configuration.files_to_tests
if files_to_tests:
# Handling through the file contents (file where each line is a test)
files_or_dirs = []
for file, tests in files_to_tests.items():
if test_framework == NOSE_FRAMEWORK:
for test in tests:
files_or_dirs.append(file + ':' + test)
elif test_framework == PY_TEST_FRAMEWORK:
file = _NormFile(file)
py_test_accept_filter[file] = tests
files_or_dirs.append(file)
else:
raise AssertionError('Cannot handle test framework: %s at this point.' % (test_framework,))
else:
if configuration.tests:
# Tests passed (works together with the files_or_dirs)
files_or_dirs = []
for file in configuration.files_or_dirs:
if test_framework == NOSE_FRAMEWORK:
for t in configuration.tests:
files_or_dirs.append(file + ':' + t)
elif test_framework == PY_TEST_FRAMEWORK:
file = _NormFile(file)
py_test_accept_filter[file] = configuration.tests
files_or_dirs.append(file)
else:
raise AssertionError('Cannot handle test framework: %s at this point.' % (test_framework,))
else:
# Only files or dirs passed (let it do the test-loading based on those paths)
files_or_dirs = configuration.files_or_dirs
argv = other_test_framework_params + files_or_dirs
if test_framework == NOSE_FRAMEWORK:
# Nose usage: http://somethingaboutorange.com/mrl/projects/nose/0.11.2/usage.html
# show_stdout_option = ['-s']
# processes_option = ['--processes=2']
argv.insert(0, sys.argv[0])
if DEBUG:
sys.stdout.write('Final test framework args: %s\n' % (argv[1:],))
from _pydev_runfiles import pydev_runfiles_nose
PYDEV_NOSE_PLUGIN_SINGLETON = pydev_runfiles_nose.start_pydev_nose_plugin_singleton(configuration)
argv.append('--with-pydevplugin')
# Return 'not' because it will return 'success' (so, exit == 0 if success)
return not nose.run(argv=argv, addplugins=[PYDEV_NOSE_PLUGIN_SINGLETON])
elif test_framework == PY_TEST_FRAMEWORK:
if DEBUG:
sys.stdout.write('Final test framework args: %s\n' % (argv,))
sys.stdout.write('py_test_accept_filter: %s\n' % (py_test_accept_filter,))
def dotted(p):
# Helper to convert path to have dots instead of slashes
return os.path.normpath(p).replace(os.sep, "/").replace('/', '.')
curr_dir = os.path.realpath('.')
curr_dotted = dotted(curr_dir) + '.'
# Overcome limitation on py.test:
# When searching conftest if we have a structure as:
# /my_package
# /my_package/conftest.py
# /my_package/tests
# /my_package/tests/test_my_package.py
# The test_my_package won't have access to the conftest contents from the
# test_my_package.py file unless the working dir is set to /my_package.
#
# See related issue (for which we work-around below):
# https://bitbucket.org/hpk42/pytest/issue/639/conftest-being-loaded-twice-giving
for path in sys.path:
path_dotted = dotted(path)
if curr_dotted.startswith(path_dotted):
os.chdir(path)
break
for i in xrange(len(argv)):
arg = argv[i]
# Workaround bug in py.test: if we pass the full path it ends up importing conftest
# more than once (so, always work with relative paths).
if os.path.isfile(arg) or os.path.isdir(arg):
from _pydev_bundle.pydev_imports import relpath
try:
# May fail if on different drives
arg = relpath(arg)
except ValueError:
pass
else:
argv[i] = arg
# To find our runfile helpers (i.e.: plugin)...
d = os.path.dirname(__file__)
if d not in sys.path:
sys.path.insert(0, d)
import pickle, zlib, base64
# Update environment PYTHONPATH so that it finds our plugin if using xdist.
os.environ['PYTHONPATH'] = os.pathsep.join(sys.path)
# Set what should be skipped in the plugin through an environment variable
s = base64.b64encode(zlib.compress(pickle.dumps(py_test_accept_filter)))
if pydevd_constants.IS_PY3K:
s = s.decode('ascii') # Must be str in py3.
os.environ['PYDEV_PYTEST_SKIP'] = s
# Identifies the main pid (i.e.: if it's not the main pid it has to connect back to the
# main pid to give xml-rpc notifications).
os.environ['PYDEV_MAIN_PID'] = str(os.getpid())
os.environ['PYDEV_PYTEST_SERVER'] = str(configuration.port)
argv.append('-p')
argv.append('_pydev_runfiles.pydev_runfiles_pytest2')
if 'unittest' in sys.modules or 'unittest2' in sys.modules:
sys.stderr.write('pydev test runner error: imported unittest before running pytest.main\n')
return pytest.main(argv)
else:
raise AssertionError('Cannot handle test framework: %s at this point.' % (test_framework,))
if __name__ == '__main__':
try:
main()
finally:
try:
# The server is not a daemon thread, so, we have to ask for it to be killed!
from _pydev_runfiles import pydev_runfiles_xml_rpc
pydev_runfiles_xml_rpc.force_server_kill()
except:
pass # Ignore any errors here
import sys
import threading
if hasattr(sys, '_current_frames') and hasattr(threading, 'enumerate'):
import time
import traceback
class DumpThreads(threading.Thread):
def run(self):
time.sleep(10)
thread_id_to_name = {}
try:
for t in threading.enumerate():
thread_id_to_name[t.ident] = '%s (daemon: %s)' % (t.name, t.daemon)
except:
pass
stack_trace = [
'===============================================================================',
'pydev pyunit runner: Threads still found running after tests finished',
'================================= Thread Dump =================================']
for thread_id, stack in sys._current_frames().items():
stack_trace.append('\n-------------------------------------------------------------------------------')
stack_trace.append(" Thread %s" % thread_id_to_name.get(thread_id, thread_id))
stack_trace.append('')
if 'self' in stack.f_locals:
sys.stderr.write(str(stack.f_locals['self']) + '\n')
for filename, lineno, name, line in traceback.extract_stack(stack):
stack_trace.append(' File "%s", line %d, in %s' % (filename, lineno, name))
if line:
stack_trace.append(" %s" % (line.strip()))
stack_trace.append('\n=============================== END Thread Dump ===============================')
sys.stderr.write('\n'.join(stack_trace))
dump_current_frames_thread = DumpThreads()
dump_current_frames_thread.setDaemon(True) # Daemon so that this thread doesn't halt it!
dump_current_frames_thread.start()
|
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.transform.executor."""
import copy
import json
import os
import tempfile
from absl.testing import parameterized
import apache_beam as beam
import tensorflow as tf
import tensorflow_transform as tft
from tensorflow_transform.beam import tft_unit
from tfx import types
from tfx.components.testdata.module_file import transform_module
from tfx.components.transform import executor
from tfx.dsl.io import fileio
from tfx.proto import transform_pb2
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import name_utils
from tfx.utils import proto_utils
def _get_dataset_size(files):
if tf.executing_eagerly():
return sum(
1 for _ in tf.data.TFRecordDataset(files, compression_type='GZIP'))
else:
result = 0
for file in files:
result += sum(1 for _ in tf.compat.v1.io.tf_record_iterator(
file, tf.io.TFRecordOptions(compression_type='GZIP')))
return result
class _TempPath(types.Artifact):
TYPE_NAME = 'TempPath'
# TODO(b/122478841): Add more detailed tests.
class ExecutorTest(tft_unit.TransformTestCase):
_TEMP_EXAMPLE_DIR = tempfile.mkdtemp()
_SOURCE_DATA_DIR = os.path.join(
os.path.dirname(os.path.dirname(__file__)), 'testdata')
_ARTIFACT1_URI = os.path.join(_TEMP_EXAMPLE_DIR, 'csv_example_gen1')
_ARTIFACT2_URI = os.path.join(_TEMP_EXAMPLE_DIR, 'csv_example_gen2')
# executor_v2_test.py overrides this to False.
def _use_force_tf_compat_v1(self):
return True
@classmethod
def setUpClass(cls):
super(ExecutorTest, cls).setUpClass()
source_example_dir = os.path.join(cls._SOURCE_DATA_DIR, 'csv_example_gen')
io_utils.copy_dir(source_example_dir, cls._ARTIFACT1_URI)
io_utils.copy_dir(source_example_dir, cls._ARTIFACT2_URI)
# Duplicate the number of train and eval records such that
# second artifact has twice as many as first.
artifact2_pattern = os.path.join(cls._ARTIFACT2_URI, '*', '*')
artifact2_files = fileio.glob(artifact2_pattern)
for filepath in artifact2_files:
directory, filename = os.path.split(filepath)
io_utils.copy_file(filepath, os.path.join(directory, 'dup_' + filename))
def _get_output_data_dir(self, sub_dir=None):
test_dir = self._testMethodName
if sub_dir is not None:
test_dir = os.path.join(test_dir, sub_dir)
return os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
test_dir)
def _make_base_do_params(self, source_data_dir, output_data_dir):
# Create input dict.
example1 = standard_artifacts.Examples()
example1.uri = self._ARTIFACT1_URI
example1.split_names = artifact_utils.encode_split_names(['train', 'eval'])
example2 = copy.deepcopy(example1)
example2.uri = self._ARTIFACT2_URI
self._example_artifacts = [example1, example2]
schema_artifact = standard_artifacts.Schema()
schema_artifact.uri = os.path.join(source_data_dir, 'schema_gen')
self._input_dict = {
standard_component_specs.EXAMPLES_KEY: self._example_artifacts[:1],
standard_component_specs.SCHEMA_KEY: [schema_artifact],
}
# Create output dict.
self._transformed_output = standard_artifacts.TransformGraph()
self._transformed_output.uri = os.path.join(output_data_dir,
'transformed_graph')
transformed1 = standard_artifacts.Examples()
transformed1.uri = os.path.join(output_data_dir, 'transformed_examples',
'0')
transformed2 = standard_artifacts.Examples()
transformed2.uri = os.path.join(output_data_dir, 'transformed_examples',
'1')
self._transformed_example_artifacts = [transformed1, transformed2]
temp_path_output = _TempPath()
temp_path_output.uri = tempfile.mkdtemp()
self._updated_analyzer_cache_artifact = standard_artifacts.TransformCache()
self._updated_analyzer_cache_artifact.uri = os.path.join(
self._output_data_dir, 'CACHE')
self._pre_transform_schema = standard_artifacts.Schema()
self._pre_transform_schema.uri = os.path.join(output_data_dir,
'pre_transform_schema', '0')
self._pre_transform_stats = standard_artifacts.ExampleStatistics()
self._pre_transform_stats.uri = os.path.join(output_data_dir,
'pre_transform_stats', '0')
self._post_transform_schema = standard_artifacts.Schema()
self._post_transform_schema.uri = os.path.join(output_data_dir,
'post_transform_schema', '0')
self._post_transform_stats = standard_artifacts.ExampleStatistics()
self._post_transform_stats.uri = os.path.join(output_data_dir,
'post_transform_stats', '0')
self._post_transform_anomalies = standard_artifacts.ExampleAnomalies()
self._post_transform_anomalies.uri = os.path.join(
output_data_dir, 'post_transform_anomalies', '0')
self._output_dict = {
standard_component_specs.TRANSFORM_GRAPH_KEY: [
self._transformed_output
],
standard_component_specs.TRANSFORMED_EXAMPLES_KEY:
self._transformed_example_artifacts[:1],
executor.TEMP_PATH_KEY: [temp_path_output],
standard_component_specs.UPDATED_ANALYZER_CACHE_KEY: [
self._updated_analyzer_cache_artifact
],
standard_component_specs.PRE_TRANSFORM_STATS_KEY: [
self._pre_transform_stats
],
standard_component_specs.PRE_TRANSFORM_SCHEMA_KEY: [
self._pre_transform_schema
],
standard_component_specs.POST_TRANSFORM_ANOMALIES_KEY: [
self._post_transform_anomalies
],
standard_component_specs.POST_TRANSFORM_STATS_KEY: [
self._post_transform_stats
],
standard_component_specs.POST_TRANSFORM_SCHEMA_KEY: [
self._post_transform_schema
],
}
# Create exec properties skeleton.
self._exec_properties = {}
def setUp(self):
super().setUp()
self._output_data_dir = self._get_output_data_dir()
self._make_base_do_params(self._SOURCE_DATA_DIR, self._output_data_dir)
# Create exec properties skeleton.
self._module_file = os.path.join(self._SOURCE_DATA_DIR,
'module_file/transform_module.py')
self._preprocessing_fn = name_utils.get_full_name(
transform_module.preprocessing_fn)
self._stats_options_updater_fn = name_utils.get_full_name(
transform_module.stats_options_updater_fn)
self._exec_properties[standard_component_specs.SPLITS_CONFIG_KEY] = None
self._exec_properties[
standard_component_specs.FORCE_TF_COMPAT_V1_KEY] = int(
self._use_force_tf_compat_v1())
# Executor for test.
self._transform_executor = executor.Executor()
def _verify_transform_outputs(self,
materialize=True,
store_cache=True,
multiple_example_inputs=False,
disable_statistics=False):
expected_outputs = ['transformed_graph']
if store_cache:
expected_outputs.append('CACHE')
self.assertNotEqual(
0, len(fileio.listdir(self._updated_analyzer_cache_artifact.uri)))
example_artifacts = self._example_artifacts[:1]
transformed_example_artifacts = self._transformed_example_artifacts[:1]
if multiple_example_inputs:
example_artifacts = self._example_artifacts
transformed_example_artifacts = self._transformed_example_artifacts
if materialize:
expected_outputs.append('transformed_examples')
assert len(example_artifacts) == len(transformed_example_artifacts)
for example, transformed_example in zip(example_artifacts,
transformed_example_artifacts):
examples_train_files = fileio.glob(
os.path.join(example.uri, 'Split-train', '*'))
transformed_train_files = fileio.glob(
os.path.join(transformed_example.uri, 'Split-train', '*'))
self.assertGreater(len(transformed_train_files), 0)
examples_eval_files = fileio.glob(
os.path.join(example.uri, 'Split-eval', '*'))
transformed_eval_files = fileio.glob(
os.path.join(transformed_example.uri, 'Split-eval', '*'))
self.assertGreater(len(transformed_eval_files), 0)
# Construct datasets and count number of records in each split.
examples_train_count = _get_dataset_size(examples_train_files)
transformed_train_count = _get_dataset_size(transformed_train_files)
examples_eval_count = _get_dataset_size(examples_eval_files)
transformed_eval_count = _get_dataset_size(transformed_eval_files)
# Check for each split that it contains the same number of records in
# the input artifact as in the output artifact (i.e 1-to-1 mapping is
# preserved).
self.assertEqual(examples_train_count, transformed_train_count)
self.assertEqual(examples_eval_count, transformed_eval_count)
self.assertGreater(transformed_train_count, transformed_eval_count)
if disable_statistics:
self.assertFalse(
fileio.exists(
os.path.join(self._pre_transform_schema.uri, 'schema.pbtxt')))
self.assertFalse(
fileio.exists(
os.path.join(self._post_transform_schema.uri, 'schema.pbtxt')))
self.assertFalse(
fileio.exists(
os.path.join(self._pre_transform_stats.uri, 'FeatureStats.pb')))
self.assertFalse(
fileio.exists(
os.path.join(self._post_transform_stats.uri, 'FeatureStats.pb')))
self.assertFalse(
fileio.exists(
os.path.join(self._post_transform_anomalies.uri,
'SchemaDiff.pb')))
else:
expected_outputs.extend([
'pre_transform_schema', 'pre_transform_stats',
'post_transform_schema', 'post_transform_stats',
'post_transform_anomalies'
])
self.assertTrue(
fileio.exists(
os.path.join(self._pre_transform_schema.uri, 'schema.pbtxt')))
self.assertTrue(
fileio.exists(
os.path.join(self._post_transform_schema.uri, 'schema.pbtxt')))
self.assertTrue(
fileio.exists(
os.path.join(self._pre_transform_stats.uri, 'FeatureStats.pb')))
self.assertTrue(
fileio.exists(
os.path.join(self._post_transform_stats.uri, 'FeatureStats.pb')))
self.assertTrue(
fileio.exists(
os.path.join(self._post_transform_anomalies.uri,
'SchemaDiff.pb')))
# Depending on `materialize` and `store_cache`, check that
# expected outputs are exactly correct. If either flag is False, its
# respective output should not be present.
self.assertCountEqual(expected_outputs,
fileio.listdir(self._output_data_dir))
path_to_saved_model = os.path.join(self._transformed_output.uri,
tft.TFTransformOutput.TRANSFORM_FN_DIR,
tf.saved_model.SAVED_MODEL_FILENAME_PB)
self.assertTrue(fileio.exists(path_to_saved_model))
def _run_pipeline_get_metrics(self):
pipelines = []
def _create_pipeline_wrapper(*_):
result = self._makeTestPipeline()
pipelines.append(result)
return result
with tft_unit.mock.patch.object(
executor.Executor,
'_make_beam_pipeline',
autospec=True,
side_effect=_create_pipeline_wrapper):
transform_executor = executor.Executor()
transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
assert len(pipelines) == 1
return pipelines[0].metrics
def test_do_with_module_file(self):
self._exec_properties[
standard_component_specs.MODULE_FILE_KEY] = self._module_file
self.assertIsNotNone(
self._transform_executor._GetStatsOptionsUpdaterFn(
self._exec_properties))
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs()
def test_do_with_preprocessing_fn(self):
self._exec_properties[
standard_component_specs.PREPROCESSING_FN_KEY] = self._preprocessing_fn
self.assertIsNone(
self._transform_executor._GetStatsOptionsUpdaterFn(
self._exec_properties))
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs()
def test_do_with_preprocessing_fn_and_stats_updater_fn(self):
self._exec_properties[
standard_component_specs.PREPROCESSING_FN_KEY] = self._preprocessing_fn
self._exec_properties[
standard_component_specs.STATS_OPTIONS_UPDATER_FN_KEY] = (
self._stats_options_updater_fn)
self.assertIsNotNone(
self._transform_executor._GetStatsOptionsUpdaterFn(
self._exec_properties))
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs()
def test_do_with_materialization_disabled(self):
self._exec_properties[
standard_component_specs.PREPROCESSING_FN_KEY] = self._preprocessing_fn
del self._output_dict[standard_component_specs.TRANSFORMED_EXAMPLES_KEY]
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs(materialize=False)
def test_do_with_cache_materialization_disabled(self):
self._exec_properties[
standard_component_specs.PREPROCESSING_FN_KEY] = self._preprocessing_fn
del self._output_dict[standard_component_specs.UPDATED_ANALYZER_CACHE_KEY]
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs(store_cache=False)
def test_do_with_statistics_disabled(self):
self._exec_properties[
standard_component_specs.MODULE_FILE_KEY] = self._module_file
self._exec_properties[
standard_component_specs.DISABLE_STATISTICS_KEY] = True
for key in [
standard_component_specs.PRE_TRANSFORM_STATS_KEY,
standard_component_specs.PRE_TRANSFORM_SCHEMA_KEY,
standard_component_specs.POST_TRANSFORM_ANOMALIES_KEY,
standard_component_specs.POST_TRANSFORM_STATS_KEY,
standard_component_specs.POST_TRANSFORM_SCHEMA_KEY
]:
self._output_dict.pop(key)
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs(disable_statistics=True)
def test_do_with_preprocessing_fn_custom_config(self):
self._exec_properties[
standard_component_specs.PREPROCESSING_FN_KEY] = (
name_utils.get_full_name(transform_module.preprocessing_fn))
self._exec_properties[
standard_component_specs.CUSTOM_CONFIG_KEY] = json.dumps({
'VOCAB_SIZE': 1000,
'OOV_SIZE': 10
})
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs()
def test_do_with_preprocessing_fn_and_none_custom_config(self):
self._exec_properties[
standard_component_specs.PREPROCESSING_FN_KEY] = (
name_utils.get_full_name(transform_module.preprocessing_fn))
self._exec_properties['custom_config'] = json.dumps(None)
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs()
def test_do_with_no_preprocessing_fn(self):
with self.assertRaises(ValueError):
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
def test_do_with_duplicate_preprocessing_fn(self):
self._exec_properties[
standard_component_specs.MODULE_FILE_KEY] = self._module_file
self._exec_properties[
standard_component_specs.PREPROCESSING_FN_KEY] = self._preprocessing_fn
with self.assertRaises(ValueError):
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
def test_do_with_multiple_artifacts(self):
self._exec_properties[
standard_component_specs.MODULE_FILE_KEY] = self._module_file
self._input_dict[
standard_component_specs.EXAMPLES_KEY] = self._example_artifacts
self._output_dict[standard_component_specs.TRANSFORMED_EXAMPLES_KEY] = (
self._transformed_example_artifacts)
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs(multiple_example_inputs=True)
def test_do_with_multiple_artifacts_single_output_artifact(self):
self._exec_properties[
standard_component_specs.MODULE_FILE_KEY] = self._module_file
self._input_dict[
standard_component_specs.EXAMPLES_KEY] = self._example_artifacts
transformed = standard_artifacts.Examples()
transformed.uri = os.path.join(self._output_data_dir,
'transformed_examples')
self._output_dict[standard_component_specs.TRANSFORMED_EXAMPLES_KEY] = ([
transformed
])
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs(multiple_example_inputs=True)
def test_do_with_custom_splits(self):
self._exec_properties[
standard_component_specs.SPLITS_CONFIG_KEY] = proto_utils.proto_to_json(
transform_pb2.SplitsConfig(
analyze=['train'], transform=['train', 'eval']))
self._exec_properties[
standard_component_specs.MODULE_FILE_KEY] = self._module_file
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs()
def test_do_with_empty_analyze_splits(self):
self._exec_properties[
standard_component_specs.SPLITS_CONFIG_KEY] = proto_utils.proto_to_json(
transform_pb2.SplitsConfig(analyze=[], transform=['train', 'eval']))
self._exec_properties[
standard_component_specs.MODULE_FILE_KEY] = self._module_file
with self.assertRaises(ValueError):
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
def test_do_with_empty_transform_splits(self):
self._exec_properties['splits_config'] = proto_utils.proto_to_json(
transform_pb2.SplitsConfig(analyze=['train'], transform=[]))
self._exec_properties[
standard_component_specs.MODULE_FILE_KEY] = self._module_file
self._output_dict[standard_component_specs.TRANSFORMED_EXAMPLES_KEY] = (
self._transformed_example_artifacts[:1])
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self.assertFalse(
fileio.exists(
os.path.join(self._transformed_example_artifacts[0].uri, 'train')))
self.assertFalse(
fileio.exists(
os.path.join(self._transformed_example_artifacts[0].uri, 'eval')))
path_to_saved_model = os.path.join(self._transformed_output.uri,
tft.TFTransformOutput.TRANSFORM_FN_DIR,
tf.saved_model.SAVED_MODEL_FILENAME_PB)
self.assertTrue(fileio.exists(path_to_saved_model))
def test_counters(self):
self._exec_properties[
standard_component_specs.PREPROCESSING_FN_KEY] = self._preprocessing_fn
self._exec_properties[
standard_component_specs.DISABLE_STATISTICS_KEY] = True
metrics = self._run_pipeline_get_metrics()
# The test data has 9909 instances in the train dataset, and 5091 instances
# in the eval dataset (obtained by running:
# gqui third_party/py/tfx/components/testdata/csv_example_gen/Split-train/ \
# data* \ 'select count(*)'
# )
# Since the analysis dataset (train) is read twice (once for analysis and
# once for transform), the expected value of the num_instances counter is:
# 9909 * 2 + 5091 = 24909.
self.assertMetricsCounterEqual(metrics, 'num_instances', 24909,
['tfx.Transform'])
# Since disable_statistics is True, TFDV should see 0 instances.
self.assertMetricsCounterEqual(metrics, 'num_instances', 0,
['tfx.DataValidation'])
# We expect 2 saved_models to be created because this is a 1 phase analysis
# preprocessing_fn.
self.assertMetricsCounterEqual(metrics, 'saved_models_created', 2)
# This should be the size of the preprocessing_fn's inputs dictionary which
# is 18 according to the schema.
self.assertMetricsCounterEqual(metrics, 'total_columns_count', 18)
# There are 9 features that are passed into tft analyzers in the
# preprocessing_fn.
self.assertMetricsCounterEqual(metrics, 'analyze_columns_count', 9)
# In addition, 7 features go through a pure TF map, not including the label,
# so we expect 9 + 7 + 1 = 17 transform columns.
self.assertMetricsCounterEqual(metrics, 'transform_columns_count', 17)
# There should be 1 path used for analysis since that's what input_dict
# specifies.
self.assertMetricsCounterEqual(metrics, 'analyze_paths_count', 1)
# Analysis cache optimization is enabled (cache writing).
self.assertMetricsCounterEqual(metrics, 'analyzer_cache_enabled', 1)
# StatsGen is disabled for the test.
self.assertMetricsCounterEqual(metrics, 'disable_statistics', 1)
# Output materialization is enabled.
self.assertMetricsCounterEqual(metrics, 'materialize', 1)
# Estimated stage count is 90 because there are 9 analyzers in the
# preprocessing_fn and a single span input.
metric = metrics.query(beam.metrics.MetricsFilter().with_name(
'estimated_stage_count_with_cache'))['distributions']
self.assertLen(metric, 1)
self.assertEqual(metric[0].committed.sum, 90)
@parameterized.named_parameters([('no_1st_input_cache', False),
('empty_1st_input_cache', True)])
def test_do_with_cache(self, provide_first_input_cache):
# First run that creates cache.
self._exec_properties[
standard_component_specs.MODULE_FILE_KEY] = self._module_file
if provide_first_input_cache:
self._input_dict[standard_component_specs.ANALYZER_CACHE_KEY] = []
metrics = self._run_pipeline_get_metrics()
# The test data has 9909 instances in the train dataset, and 5091 instances
# in the eval dataset. Since the analysis dataset (train) is read twice when
# no input cache is present (once for analysis and once for transform), the
# expected value of the num_instances counter is: 9909 * 2 + 5091 = 24909.
self.assertMetricsCounterEqual(metrics, 'num_instances', 24909,
['tfx.Transform'])
# Additionally we have 24909 instances due to generating statistics.
self.assertMetricsCounterEqual(metrics, 'num_instances', 24909,
['tfx.DataValidation'])
self._verify_transform_outputs(store_cache=True)
# Second run from cache.
self._output_data_dir = self._get_output_data_dir('2nd_run')
analyzer_cache_artifact = standard_artifacts.TransformCache()
analyzer_cache_artifact.uri = self._updated_analyzer_cache_artifact.uri
self._make_base_do_params(self._SOURCE_DATA_DIR, self._output_data_dir)
self._input_dict[standard_component_specs.ANALYZER_CACHE_KEY] = [
analyzer_cache_artifact
]
self._exec_properties[
standard_component_specs.MODULE_FILE_KEY] = self._module_file
metrics = self._run_pipeline_get_metrics()
# Since input cache should now cover all analysis (train) paths, the train
# and eval sets are each read exactly once for transform. Thus, the
# expected value of the num_instances counter is: 9909 + 5091 = 15000.
self.assertMetricsCounterEqual(metrics, 'num_instances', 15000,
['tfx.Transform'])
# Additionally we have 24909 instances due to generating statistics.
self.assertMetricsCounterEqual(metrics, 'num_instances', 24909,
['tfx.DataValidation'])
self._verify_transform_outputs(store_cache=True)
@tft_unit.mock.patch.object(executor, '_MAX_ESTIMATED_STAGES_COUNT', 21)
def test_do_with_cache_disabled_too_many_stages(self):
self._exec_properties[
standard_component_specs.MODULE_FILE_KEY] = self._module_file
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs(store_cache=False)
self.assertFalse(fileio.exists(self._updated_analyzer_cache_artifact.uri))
if __name__ == '__main__':
tf.test.main()
|
|
#!/usr/bin/env python
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""Helper script for OSS-Fuzz users. Can do common tasks like building
projects/fuzzers, running them etc."""
from __future__ import print_function
from multiprocessing.dummy import Pool as ThreadPool
import argparse
import datetime
import errno
import logging
import os
import pipes
import re
import subprocess
import sys
import templates
import constants
OSS_FUZZ_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
BUILD_DIR = os.path.join(OSS_FUZZ_DIR, 'build')
BASE_RUNNER_IMAGE = 'gcr.io/oss-fuzz-base/base-runner'
BASE_IMAGES = {
'generic': [
'gcr.io/oss-fuzz-base/base-image',
'gcr.io/oss-fuzz-base/base-clang',
'gcr.io/oss-fuzz-base/base-builder',
BASE_RUNNER_IMAGE,
'gcr.io/oss-fuzz-base/base-runner-debug',
],
'go': ['gcr.io/oss-fuzz-base/base-builder-go'],
'jvm': ['gcr.io/oss-fuzz-base/base-builder-jvm'],
'python': ['gcr.io/oss-fuzz-base/base-builder-python'],
'rust': ['gcr.io/oss-fuzz-base/base-builder-rust'],
'swift': ['gcr.io/oss-fuzz-base/base-builder-swift'],
}
VALID_PROJECT_NAME_REGEX = re.compile(r'^[a-zA-Z0-9_-]+$')
MAX_PROJECT_NAME_LENGTH = 26
CORPUS_URL_FORMAT = (
'gs://{project_name}-corpus.clusterfuzz-external.appspot.com/libFuzzer/'
'{fuzz_target}/')
CORPUS_BACKUP_URL_FORMAT = (
'gs://{project_name}-backup.clusterfuzz-external.appspot.com/corpus/'
'libFuzzer/{fuzz_target}/')
LANGUAGE_REGEX = re.compile(r'[^\s]+')
PROJECT_LANGUAGE_REGEX = re.compile(r'\s*language\s*:\s*([^\s]+)')
WORKDIR_REGEX = re.compile(r'\s*WORKDIR\s*([^\s]+)')
LANGUAGES_WITH_BUILDER_IMAGES = {'go', 'jvm', 'python', 'rust', 'swift'}
if sys.version_info[0] >= 3:
raw_input = input # pylint: disable=invalid-name
# pylint: disable=too-many-lines
class Project:
"""Class representing a project that is in OSS-Fuzz or an external project
(ClusterFuzzLite user)."""
def __init__(
self,
project_name_or_path,
is_external=False,
build_integration_path=constants.DEFAULT_EXTERNAL_BUILD_INTEGRATION_PATH):
self.is_external = is_external
if self.is_external:
self.path = os.path.abspath(project_name_or_path)
self.name = os.path.basename(self.path)
self.build_integration_path = os.path.join(self.path,
build_integration_path)
else:
self.name = project_name_or_path
self.path = os.path.join(OSS_FUZZ_DIR, 'projects', self.name)
self.build_integration_path = self.path
@property
def dockerfile_path(self):
"""Returns path to the project Dockerfile."""
return os.path.join(self.build_integration_path, 'Dockerfile')
@property
def language(self):
"""Returns project language."""
if self.is_external:
# TODO(metzman): Handle this properly.
return constants.DEFAULT_LANGUAGE
project_yaml_path = os.path.join(self.path, 'project.yaml')
with open(project_yaml_path) as file_handle:
content = file_handle.read()
for line in content.splitlines():
match = PROJECT_LANGUAGE_REGEX.match(line)
if match:
return match.group(1)
logging.warning('Language not specified in project.yaml.')
return None
@property
def out(self):
"""Returns the out dir for the project. Creates it if needed."""
return _get_out_dir(self.name)
@property
def work(self):
"""Returns the out dir for the project. Creates it if needed."""
return _get_project_build_subdir(self.name, 'work')
@property
def corpus(self):
"""Returns the out dir for the project. Creates it if needed."""
return _get_project_build_subdir(self.name, 'corpus')
def main(): # pylint: disable=too-many-branches,too-many-return-statements
"""Gets subcommand from program arguments and does it. Returns 0 on success 1
on error."""
logging.basicConfig(level=logging.INFO)
parser = get_parser()
args = parse_args(parser)
# Need to do this before chdir.
# TODO(https://github.com/google/oss-fuzz/issues/6758): Get rid of chdir.
if hasattr(args, 'testcase_path'):
args.testcase_path = _get_absolute_path(args.testcase_path)
# Note: this has to happen after parse_args above as parse_args needs to know
# the original CWD for external projects.
os.chdir(OSS_FUZZ_DIR)
if not os.path.exists(BUILD_DIR):
os.mkdir(BUILD_DIR)
# We have different default values for `sanitizer` depending on the `engine`.
# Some commands do not have `sanitizer` argument, so `hasattr` is necessary.
if hasattr(args, 'sanitizer') and not args.sanitizer:
if args.engine == 'dataflow':
args.sanitizer = 'dataflow'
else:
args.sanitizer = constants.DEFAULT_SANITIZER
if args.command == 'generate':
result = generate(args)
elif args.command == 'build_image':
result = build_image(args)
elif args.command == 'build_fuzzers':
result = build_fuzzers(args)
elif args.command == 'check_build':
result = check_build(args)
elif args.command == 'download_corpora':
result = download_corpora(args)
elif args.command == 'run_fuzzer':
result = run_fuzzer(args)
elif args.command == 'coverage':
result = coverage(args)
elif args.command == 'reproduce':
result = reproduce(args)
elif args.command == 'shell':
result = shell(args)
elif args.command == 'pull_images':
result = pull_images()
else:
# Print help string if no arguments provided.
parser.print_help()
result = False
return bool_to_retcode(result)
def bool_to_retcode(boolean):
"""Returns 0 if |boolean| is Truthy, 0 is the standard return code for a
successful process execution. Returns 1 otherwise, indicating the process
failed."""
return 0 if boolean else 1
def parse_args(parser, args=None):
"""Parses |args| using |parser| and returns parsed args. Also changes
|args.build_integration_path| to have correct default behavior."""
# Use default argument None for args so that in production, argparse does its
# normal behavior, but unittesting is easier.
parsed_args = parser.parse_args(args)
project = getattr(parsed_args, 'project', None)
if not project:
return parsed_args
# Use hacky method for extracting attributes so that ShellTest works.
# TODO(metzman): Fix this.
is_external = getattr(parsed_args, 'external', False)
parsed_args.project = Project(parsed_args.project, is_external)
return parsed_args
def _add_external_project_args(parser):
parser.add_argument(
'--external',
help='Is project external?',
default=False,
action='store_true',
)
def get_parser(): # pylint: disable=too-many-statements
"""Returns an argparse parser."""
parser = argparse.ArgumentParser('helper.py', description='oss-fuzz helpers')
subparsers = parser.add_subparsers(dest='command')
generate_parser = subparsers.add_parser(
'generate', help='Generate files for new project.')
generate_parser.add_argument('project')
generate_parser.add_argument(
'--language',
default=constants.DEFAULT_LANGUAGE,
choices=['c', 'c++', 'rust', 'go', 'jvm', 'swift', 'python'],
help='Project language.')
_add_external_project_args(generate_parser)
build_image_parser = subparsers.add_parser('build_image',
help='Build an image.')
build_image_parser.add_argument('project')
build_image_parser.add_argument('--pull',
action='store_true',
help='Pull latest base image.')
build_image_parser.add_argument('--cache',
action='store_true',
default=False,
help='Use docker cache when building image.')
build_image_parser.add_argument('--no-pull',
action='store_true',
help='Do not pull latest base image.')
_add_external_project_args(build_image_parser)
build_fuzzers_parser = subparsers.add_parser(
'build_fuzzers', help='Build fuzzers for a project.')
_add_architecture_args(build_fuzzers_parser)
_add_engine_args(build_fuzzers_parser)
_add_sanitizer_args(build_fuzzers_parser)
_add_environment_args(build_fuzzers_parser)
_add_external_project_args(build_fuzzers_parser)
build_fuzzers_parser.add_argument('project')
build_fuzzers_parser.add_argument('source_path',
help='path of local source',
nargs='?')
build_fuzzers_parser.add_argument('--mount_path',
dest='mount_path',
help='path to mount local source in '
'(defaults to WORKDIR)')
build_fuzzers_parser.add_argument('--clean',
dest='clean',
action='store_true',
help='clean existing artifacts.')
build_fuzzers_parser.add_argument('--no-clean',
dest='clean',
action='store_false',
help='do not clean existing artifacts '
'(default).')
build_fuzzers_parser.set_defaults(clean=False)
check_build_parser = subparsers.add_parser(
'check_build', help='Checks that fuzzers execute without errors.')
_add_architecture_args(check_build_parser)
_add_engine_args(check_build_parser, choices=constants.ENGINES)
_add_sanitizer_args(check_build_parser, choices=constants.SANITIZERS)
_add_environment_args(check_build_parser)
check_build_parser.add_argument('project',
help='name of the project or path (external)')
check_build_parser.add_argument('fuzzer_name',
help='name of the fuzzer',
nargs='?')
_add_external_project_args(check_build_parser)
run_fuzzer_parser = subparsers.add_parser(
'run_fuzzer', help='Run a fuzzer in the emulated fuzzing environment.')
_add_engine_args(run_fuzzer_parser)
_add_sanitizer_args(run_fuzzer_parser)
_add_environment_args(run_fuzzer_parser)
_add_external_project_args(run_fuzzer_parser)
run_fuzzer_parser.add_argument(
'--corpus-dir', help='directory to store corpus for the fuzz target')
run_fuzzer_parser.add_argument('project',
help='name of the project or path (external)')
run_fuzzer_parser.add_argument('fuzzer_name', help='name of the fuzzer')
run_fuzzer_parser.add_argument('fuzzer_args',
help='arguments to pass to the fuzzer',
nargs='*')
coverage_parser = subparsers.add_parser(
'coverage', help='Generate code coverage report for the project.')
coverage_parser.add_argument('--no-corpus-download',
action='store_true',
help='do not download corpus backup from '
'OSS-Fuzz; use corpus located in '
'build/corpus/<project>/<fuzz_target>/')
coverage_parser.add_argument('--port',
default='8008',
help='specify port for'
' a local HTTP server rendering coverage report')
coverage_parser.add_argument('--fuzz-target',
help='specify name of a fuzz '
'target to be run for generating coverage '
'report')
coverage_parser.add_argument('--corpus-dir',
help='specify location of corpus'
' to be used (requires --fuzz-target argument)')
coverage_parser.add_argument('project',
help='name of the project or path (external)')
coverage_parser.add_argument('extra_args',
help='additional arguments to '
'pass to llvm-cov utility.',
nargs='*')
_add_external_project_args(coverage_parser)
download_corpora_parser = subparsers.add_parser(
'download_corpora', help='Download all corpora for a project.')
download_corpora_parser.add_argument('--fuzz-target',
help='specify name of a fuzz target')
download_corpora_parser.add_argument(
'project', help='name of the project or path (external)')
reproduce_parser = subparsers.add_parser('reproduce',
help='Reproduce a crash.')
reproduce_parser.add_argument('--valgrind',
action='store_true',
help='run with valgrind')
reproduce_parser.add_argument('project',
help='name of the project or path (external)')
reproduce_parser.add_argument('fuzzer_name', help='name of the fuzzer')
reproduce_parser.add_argument('testcase_path', help='path of local testcase')
reproduce_parser.add_argument('fuzzer_args',
help='arguments to pass to the fuzzer',
nargs='*')
_add_environment_args(reproduce_parser)
_add_external_project_args(reproduce_parser)
shell_parser = subparsers.add_parser(
'shell', help='Run /bin/bash within the builder container.')
shell_parser.add_argument('project',
help='name of the project or path (external)')
shell_parser.add_argument('source_path',
help='path of local source',
nargs='?')
_add_architecture_args(shell_parser)
_add_engine_args(shell_parser)
_add_sanitizer_args(shell_parser)
_add_environment_args(shell_parser)
_add_external_project_args(shell_parser)
subparsers.add_parser('pull_images', help='Pull base images.')
return parser
def is_base_image(image_name):
"""Checks if the image name is a base image."""
return os.path.exists(os.path.join('infra', 'base-images', image_name))
def check_project_exists(project):
"""Checks if a project exists."""
if os.path.exists(project.path):
return True
if project.is_external:
descriptive_project_name = project.path
else:
descriptive_project_name = project.name
logging.error('"%s" does not exist.', descriptive_project_name)
return False
def _check_fuzzer_exists(project, fuzzer_name):
"""Checks if a fuzzer exists."""
command = ['docker', 'run', '--rm']
command.extend(['-v', '%s:/out' % project.out])
command.append(BASE_RUNNER_IMAGE)
command.extend(['/bin/bash', '-c', 'test -f /out/%s' % fuzzer_name])
try:
subprocess.check_call(command)
except subprocess.CalledProcessError:
logging.error('%s does not seem to exist. Please run build_fuzzers first.',
fuzzer_name)
return False
return True
def _get_absolute_path(path):
"""Returns absolute path with user expansion."""
return os.path.abspath(os.path.expanduser(path))
def _get_command_string(command):
"""Returns a shell escaped command string."""
return ' '.join(pipes.quote(part) for part in command)
def _get_project_build_subdir(project, subdir_name):
"""Creates the |subdir_name| subdirectory of the |project| subdirectory in
|BUILD_DIR| and returns its path."""
directory = os.path.join(BUILD_DIR, subdir_name, project)
if not os.path.exists(directory):
os.makedirs(directory)
return directory
def _get_out_dir(project=''):
"""Creates and returns path to /out directory for the given project (if
specified)."""
return _get_project_build_subdir(project, 'out')
def _add_architecture_args(parser, choices=None):
"""Adds common architecture args."""
if choices is None:
choices = constants.ARCHITECTURES
parser.add_argument('--architecture',
default=constants.DEFAULT_ARCHITECTURE,
choices=choices)
def _add_engine_args(parser, choices=None):
"""Adds common engine args."""
if choices is None:
choices = constants.ENGINES
parser.add_argument('--engine',
default=constants.DEFAULT_ENGINE,
choices=choices)
def _add_sanitizer_args(parser, choices=None):
"""Adds common sanitizer args."""
if choices is None:
choices = constants.SANITIZERS
parser.add_argument(
'--sanitizer',
default=None,
choices=choices,
help='the default is "address"; "dataflow" for "dataflow" engine')
def _add_environment_args(parser):
"""Adds common environment args."""
parser.add_argument('-e',
action='append',
help="set environment variable e.g. VAR=value")
def build_image_impl(project, cache=True, pull=False):
"""Builds image."""
image_name = project.name
if is_base_image(image_name):
image_project = 'oss-fuzz-base'
docker_build_dir = os.path.join(OSS_FUZZ_DIR, 'infra', 'base-images',
image_name)
dockerfile_path = os.path.join(docker_build_dir, 'Dockerfile')
else:
if not check_project_exists(project):
return False
dockerfile_path = project.dockerfile_path
docker_build_dir = project.path
image_project = 'oss-fuzz'
if pull and not pull_images(project.language):
return False
build_args = []
if not cache:
build_args.append('--no-cache')
build_args += [
'-t',
'gcr.io/%s/%s' % (image_project, image_name), '--file', dockerfile_path
]
build_args.append(docker_build_dir)
return docker_build(build_args)
def _env_to_docker_args(env_list):
"""Turns envirnoment variable list into docker arguments."""
return sum([['-e', v] for v in env_list], [])
def workdir_from_lines(lines, default='/src'):
"""Gets the WORKDIR from the given lines."""
for line in reversed(lines): # reversed to get last WORKDIR.
match = re.match(WORKDIR_REGEX, line)
if match:
workdir = match.group(1)
workdir = workdir.replace('$SRC', '/src')
if not os.path.isabs(workdir):
workdir = os.path.join('/src', workdir)
return os.path.normpath(workdir)
return default
def _workdir_from_dockerfile(project):
"""Parses WORKDIR from the Dockerfile for the given project."""
with open(project.dockerfile_path) as file_handle:
lines = file_handle.readlines()
return workdir_from_lines(lines, default=os.path.join('/src', project.name))
def docker_run(run_args, print_output=True):
"""Calls `docker run`."""
command = ['docker', 'run', '--rm', '--privileged']
# Support environments with a TTY.
if sys.stdin.isatty():
command.append('-i')
command.extend(run_args)
logging.info('Running: %s.', _get_command_string(command))
stdout = None
if not print_output:
stdout = open(os.devnull, 'w')
try:
subprocess.check_call(command, stdout=stdout, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return False
return True
def docker_build(build_args):
"""Calls `docker build`."""
command = ['docker', 'build']
command.extend(build_args)
logging.info('Running: %s.', _get_command_string(command))
try:
subprocess.check_call(command)
except subprocess.CalledProcessError:
logging.error('Docker build failed.')
return False
return True
def docker_pull(image):
"""Call `docker pull`."""
command = ['docker', 'pull', image]
logging.info('Running: %s', _get_command_string(command))
try:
subprocess.check_call(command)
except subprocess.CalledProcessError:
logging.error('Docker pull failed.')
return False
return True
def build_image(args):
"""Builds docker image."""
if args.pull and args.no_pull:
logging.error('Incompatible arguments --pull and --no-pull.')
return False
if args.pull:
pull = True
elif args.no_pull:
pull = False
else:
y_or_n = raw_input('Pull latest base images (compiler/runtime)? (y/N): ')
pull = y_or_n.lower() == 'y'
if pull:
logging.info('Pulling latest base images...')
else:
logging.info('Using cached base images...')
# If build_image is called explicitly, don't use cache.
if build_image_impl(args.project, cache=args.cache, pull=pull):
return True
return False
def build_fuzzers_impl( # pylint: disable=too-many-arguments,too-many-locals,too-many-branches
project,
clean,
engine,
sanitizer,
architecture,
env_to_add,
source_path,
mount_path=None):
"""Builds fuzzers."""
if not build_image_impl(project):
return False
if clean:
logging.info('Cleaning existing build artifacts.')
# Clean old and possibly conflicting artifacts in project's out directory.
docker_run([
'-v',
'%s:/out' % project.out, '-t',
'gcr.io/oss-fuzz/%s' % project.name, '/bin/bash', '-c', 'rm -rf /out/*'
])
docker_run([
'-v',
'%s:/work' % project.work, '-t',
'gcr.io/oss-fuzz/%s' % project.name, '/bin/bash', '-c', 'rm -rf /work/*'
])
else:
logging.info('Keeping existing build artifacts as-is (if any).')
env = [
'FUZZING_ENGINE=' + engine,
'SANITIZER=' + sanitizer,
'ARCHITECTURE=' + architecture,
'GIT_REPO=', # TODO(navidem): load from main_repo in project.yaml.
]
_add_oss_fuzz_ci_if_needed(env)
if project.language:
env.append('FUZZING_LANGUAGE=' + project.language)
if env_to_add:
env += env_to_add
command = _env_to_docker_args(env)
if source_path:
workdir = _workdir_from_dockerfile(project)
if mount_path:
command += [
'-v',
'%s:%s' % (_get_absolute_path(source_path), mount_path),
]
else:
if workdir == '/src':
logging.error('Cannot use local checkout with "WORKDIR: /src".')
return False
command += [
'-v',
'%s:%s' % (_get_absolute_path(source_path), workdir),
]
command += [
'-v',
'%s:/out' % project.out, '-v',
'%s:/work' % project.work, '-t',
'gcr.io/oss-fuzz/%s' % project.name
]
result = docker_run(command)
if not result:
logging.error('Building fuzzers failed.')
return False
return True
def build_fuzzers(args):
"""Builds fuzzers."""
return build_fuzzers_impl(args.project,
args.clean,
args.engine,
args.sanitizer,
args.architecture,
args.e,
args.source_path,
mount_path=args.mount_path)
def _add_oss_fuzz_ci_if_needed(env):
"""Adds value of |OSS_FUZZ_CI| environment variable to |env| if it is set."""
oss_fuzz_ci = os.getenv('OSS_FUZZ_CI')
if oss_fuzz_ci:
env.append('OSS_FUZZ_CI=' + oss_fuzz_ci)
def check_build(args):
"""Checks that fuzzers in the container execute without errors."""
if not check_project_exists(args.project):
return False
if (args.fuzzer_name and
not _check_fuzzer_exists(args.project, args.fuzzer_name)):
return False
fuzzing_language = args.project.language
if not fuzzing_language:
fuzzing_language = constants.DEFAULT_LANGUAGE
logging.warning('Language not specified in project.yaml. Defaulting to %s.',
fuzzing_language)
env = [
'FUZZING_ENGINE=' + args.engine,
'SANITIZER=' + args.sanitizer,
'ARCHITECTURE=' + args.architecture,
'FUZZING_LANGUAGE=' + fuzzing_language,
]
_add_oss_fuzz_ci_if_needed(env)
if args.e:
env += args.e
run_args = _env_to_docker_args(env) + [
'-v', '%s:/out' % args.project.out, '-t', BASE_RUNNER_IMAGE
]
if args.fuzzer_name:
run_args += ['test_one.py', args.fuzzer_name]
else:
run_args.append('test_all.py')
result = docker_run(run_args)
if result:
logging.info('Check build passed.')
else:
logging.error('Check build failed.')
return result
def _get_fuzz_targets(project):
"""Returns names of fuzz targest build in the project's /out directory."""
fuzz_targets = []
for name in os.listdir(project.out):
if name.startswith('afl-'):
continue
if name.startswith('jazzer_'):
continue
if name == 'llvm-symbolizer':
continue
path = os.path.join(project.out, name)
# Python and JVM fuzz targets are only executable for the root user, so
# we can't use os.access.
if os.path.isfile(path) and (os.stat(path).st_mode & 0o111):
fuzz_targets.append(name)
return fuzz_targets
def _get_latest_corpus(project, fuzz_target, base_corpus_dir):
"""Downloads the latest corpus for the given fuzz target."""
corpus_dir = os.path.join(base_corpus_dir, fuzz_target)
if not os.path.exists(corpus_dir):
os.makedirs(corpus_dir)
if not fuzz_target.startswith(project.name + '_'):
fuzz_target = '%s_%s' % (project.name, fuzz_target)
corpus_backup_url = CORPUS_BACKUP_URL_FORMAT.format(project_name=project.name,
fuzz_target=fuzz_target)
command = ['gsutil', 'ls', corpus_backup_url]
# Don't capture stderr. We want it to print in real time, in case gsutil is
# asking for two-factor authentication.
corpus_listing = subprocess.Popen(command, stdout=subprocess.PIPE)
output, _ = corpus_listing.communicate()
# Some fuzz targets (e.g. new ones) may not have corpus yet, just skip those.
if corpus_listing.returncode:
logging.warning('Corpus for %s not found:\n', fuzz_target)
return
if output:
latest_backup_url = output.splitlines()[-1]
archive_path = corpus_dir + '.zip'
command = ['gsutil', '-q', 'cp', latest_backup_url, archive_path]
subprocess.check_call(command)
command = ['unzip', '-q', '-o', archive_path, '-d', corpus_dir]
subprocess.check_call(command)
os.remove(archive_path)
else:
# Sync the working corpus copy if a minimized backup is not available.
corpus_url = CORPUS_URL_FORMAT.format(project_name=project.name,
fuzz_target=fuzz_target)
command = ['gsutil', '-m', '-q', 'rsync', '-R', corpus_url, corpus_dir]
subprocess.check_call(command)
def download_corpora(args):
"""Downloads most recent corpora from GCS for the given project."""
if not check_project_exists(args.project):
return False
try:
with open(os.devnull, 'w') as stdout:
subprocess.check_call(['gsutil', '--version'], stdout=stdout)
except OSError:
logging.error('gsutil not found. Please install it from '
'https://cloud.google.com/storage/docs/gsutil_install')
return False
if args.fuzz_target:
fuzz_targets = [args.fuzz_target]
else:
fuzz_targets = _get_fuzz_targets(args.project)
corpus_dir = args.project.corpus
def _download_for_single_target(fuzz_target):
try:
_get_latest_corpus(args.project, fuzz_target, corpus_dir)
return True
except Exception as error: # pylint:disable=broad-except
logging.error('Corpus download for %s failed: %s.', fuzz_target,
str(error))
return False
logging.info('Downloading corpora for %s project to %s.', args.project.name,
corpus_dir)
thread_pool = ThreadPool()
return all(thread_pool.map(_download_for_single_target, fuzz_targets))
def coverage(args):
"""Generates code coverage using clang source based code coverage."""
if args.corpus_dir and not args.fuzz_target:
logging.error(
'--corpus-dir requires specifying a particular fuzz target using '
'--fuzz-target')
return False
if not check_project_exists(args.project):
return False
if args.project.language not in constants.LANGUAGES_WITH_COVERAGE_SUPPORT:
logging.error(
'Project is written in %s, coverage for it is not supported yet.',
args.project.language)
return False
if (not args.no_corpus_download and not args.corpus_dir and
not args.project.is_external):
if not download_corpora(args):
return False
env = [
'FUZZING_ENGINE=libfuzzer',
'FUZZING_LANGUAGE=%s' % args.project.language,
'PROJECT=%s' % args.project.name,
'SANITIZER=coverage',
'HTTP_PORT=%s' % args.port,
'COVERAGE_EXTRA_ARGS=%s' % ' '.join(args.extra_args),
]
run_args = _env_to_docker_args(env)
if args.port:
run_args.extend([
'-p',
'%s:%s' % (args.port, args.port),
])
if args.corpus_dir:
if not os.path.exists(args.corpus_dir):
logging.error('The path provided in --corpus-dir argument does not '
'exist.')
return False
corpus_dir = os.path.realpath(args.corpus_dir)
run_args.extend(['-v', '%s:/corpus/%s' % (corpus_dir, args.fuzz_target)])
else:
run_args.extend(['-v', '%s:/corpus' % args.project.corpus])
run_args.extend([
'-v',
'%s:/out' % args.project.out,
'-t',
BASE_RUNNER_IMAGE,
])
run_args.append('coverage')
if args.fuzz_target:
run_args.append(args.fuzz_target)
result = docker_run(run_args)
if result:
logging.info('Successfully generated clang code coverage report.')
else:
logging.error('Failed to generate clang code coverage report.')
return result
def run_fuzzer(args):
"""Runs a fuzzer in the container."""
if not check_project_exists(args.project):
return False
if not _check_fuzzer_exists(args.project, args.fuzzer_name):
return False
env = [
'FUZZING_ENGINE=' + args.engine,
'SANITIZER=' + args.sanitizer,
'RUN_FUZZER_MODE=interactive',
]
if args.e:
env += args.e
run_args = _env_to_docker_args(env)
if args.corpus_dir:
if not os.path.exists(args.corpus_dir):
logging.error('The path provided in --corpus-dir argument does not exist')
return False
corpus_dir = os.path.realpath(args.corpus_dir)
run_args.extend([
'-v',
'{corpus_dir}:/tmp/{fuzzer}_corpus'.format(corpus_dir=corpus_dir,
fuzzer=args.fuzzer_name)
])
run_args.extend([
'-v',
'%s:/out' % args.project.out,
'-t',
BASE_RUNNER_IMAGE,
'run_fuzzer',
args.fuzzer_name,
] + args.fuzzer_args)
return docker_run(run_args)
def reproduce(args):
"""Reproduces a specific test case from a specific project."""
return reproduce_impl(args.project, args.fuzzer_name, args.valgrind, args.e,
args.fuzzer_args, args.testcase_path)
def reproduce_impl( # pylint: disable=too-many-arguments
project,
fuzzer_name,
valgrind,
env_to_add,
fuzzer_args,
testcase_path,
run_function=docker_run,
err_result=False):
"""Reproduces a testcase in the container."""
if not check_project_exists(project):
return err_result
if not _check_fuzzer_exists(project, fuzzer_name):
return err_result
debugger = ''
env = []
image_name = 'base-runner'
if valgrind:
debugger = 'valgrind --tool=memcheck --track-origins=yes --leak-check=full'
if debugger:
image_name = 'base-runner-debug'
env += ['DEBUGGER=' + debugger]
if env_to_add:
env += env_to_add
run_args = _env_to_docker_args(env) + [
'-v',
'%s:/out' % project.out,
'-v',
'%s:/testcase' % _get_absolute_path(testcase_path),
'-t',
'gcr.io/oss-fuzz-base/%s' % image_name,
'reproduce',
fuzzer_name,
'-runs=100',
] + fuzzer_args
return run_function(run_args)
def _validate_project_name(project_name):
"""Validates |project_name| is a valid OSS-Fuzz project name."""
if len(project_name) > MAX_PROJECT_NAME_LENGTH:
logging.error(
'Project name needs to be less than or equal to %d characters.',
MAX_PROJECT_NAME_LENGTH)
return False
if not VALID_PROJECT_NAME_REGEX.match(project_name):
logging.info('Invalid project name: %s.', project_name)
return False
return True
def _validate_language(language):
if not LANGUAGE_REGEX.match(language):
logging.error('Invalid project language %s.', language)
return False
return True
def _create_build_integration_directory(directory):
"""Returns True on successful creation of a build integration directory.
Suitable for OSS-Fuzz and external projects."""
try:
os.makedirs(directory)
except OSError as error:
if error.errno != errno.EEXIST:
raise
logging.error('%s already exists.', directory)
return False
return True
def _template_project_file(filename, template, template_args, directory):
"""Templates |template| using |template_args| and writes the result to
|directory|/|filename|. Sets the file to executable if |filename| is
build.sh."""
file_path = os.path.join(directory, filename)
with open(file_path, 'w') as file_handle:
file_handle.write(template % template_args)
if filename == 'build.sh':
os.chmod(file_path, 0o755)
def generate(args):
"""Generates empty project files."""
return _generate_impl(args.project, args.language)
def _get_current_datetime():
"""Returns this year. Needed for mocking."""
return datetime.datetime.now()
def _base_builder_from_language(language):
"""Returns the base builder for the specified language."""
if language not in LANGUAGES_WITH_BUILDER_IMAGES:
return 'base-builder'
return 'base-builder-{language}'.format(language=language)
def _generate_impl(project, language):
"""Implementation of generate(). Useful for testing."""
if project.is_external:
# External project.
project_templates = templates.EXTERNAL_TEMPLATES
else:
# Internal project.
if not _validate_project_name(project.name):
return False
project_templates = templates.TEMPLATES
if not _validate_language(language):
return False
directory = project.build_integration_path
if not _create_build_integration_directory(directory):
return False
logging.info('Writing new files to: %s.', directory)
template_args = {
'project_name': project.name,
'base_builder': _base_builder_from_language(language),
'language': language,
'year': _get_current_datetime().year
}
for filename, template in project_templates.items():
_template_project_file(filename, template, template_args, directory)
return True
def shell(args):
"""Runs a shell within a docker image."""
if not build_image_impl(args.project):
return False
env = [
'FUZZING_ENGINE=' + args.engine,
'SANITIZER=' + args.sanitizer,
'ARCHITECTURE=' + args.architecture,
]
if args.project.name != 'base-runner-debug':
env.append('FUZZING_LANGUAGE=' + args.project.language)
if args.e:
env += args.e
if is_base_image(args.project.name):
image_project = 'oss-fuzz-base'
out_dir = _get_out_dir()
else:
image_project = 'oss-fuzz'
out_dir = args.project.out
run_args = _env_to_docker_args(env)
if args.source_path:
run_args.extend([
'-v',
'%s:%s' % (_get_absolute_path(args.source_path), '/src'),
])
run_args.extend([
'-v',
'%s:/out' % out_dir, '-v',
'%s:/work' % args.project.work, '-t',
'gcr.io/%s/%s' % (image_project, args.project.name), '/bin/bash'
])
docker_run(run_args)
return True
def pull_images(language=None):
"""Pulls base images used to build projects in language lang (or all if lang
is None)."""
for base_image_lang, base_images in BASE_IMAGES.items():
if (language is None or base_image_lang == 'generic' or
base_image_lang == language):
for base_image in base_images:
if not docker_pull(base_image):
return False
return True
if __name__ == '__main__':
sys.exit(main())
|
|
#
# The contents of this file are subject to the Mozilla Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is State Machine Compiler (SMC).
#
# The Initial Developer of the Original Code is Charles W. Rapp.
# Portions created by Charles W. Rapp are
# Copyright (C) 2000 - 2003 Charles W. Rapp.
# All Rights Reserved.
#
# Contributor(s):
# Port to Python by Francois Perrad, [email protected]
#
# Vehicle --
#
# Draws a generic vehicle on the map (a black square) which
# moves in straight lines along the road and obeys the stop light.
#
# RCS ID
# $Id: Vehicle.py,v 1.2 2009/04/19 14:39:48 cwrapp Exp $
#
# CHANGE LOG
# $Log: Vehicle.py,v $
# Revision 1.2 2009/04/19 14:39:48 cwrapp
# Added call to enterStartState before issuing first FSM transition.
#
# Revision 1.1 2005/05/28 17:48:30 cwrapp
# Added Python examples 1 - 4 and 7.
#
#
from Tkinter import *
import Vehicle_sm
class Vehicle:
_speed = 2
def __init__(self, stoplight, direction, canvas):
self._fsm = Vehicle_sm.Vehicle_sm(self)
# The canvas to draw on and the direction this vehicle is
# moving.
self._canvas = canvas
self._direction = direction
# The stoplight object is responsible knowing the road
# layout. Ask it for all relevant information.
self._stoplight = stoplight
# This vehicle is initially at the road's outside edge.
# Figure out the road's length.
XLength = stoplight.getRoadLengthX()
YLength = stoplight.getRoadLengthY()
LaneWidth = stoplight.getRoadWidth() / 2
# The vehicle is 12 pixels x 12 pixels.
self._vehicleSize = 6
# A 3 pixel separation is to be maintained between vehicles.
self._vehicleSeparation = 3
# How far away the vehicle is from the curb.
CurbOffset = (LaneWidth - self._vehicleSize) / 2
# The vehicle's current canvas location. This is the
# square's upper left hand corner.
if direction == 'north':
self._xpos = (XLength / 2) + CurbOffset
self._ypos = YLength - self._vehicleSize
elif direction == 'south':
self._xpos = (XLength / 2) - LaneWidth + CurbOffset
self._ypos = 0
elif direction == 'east':
self._xpos = 0
self._ypos = (YLength / 2) + CurbOffset
elif direction == 'west':
self._xpos = XLength - self._vehicleSize
self._ypos = (YLength / 2) - LaneWidth + CurbOffset
# Put the vehicle on display.
self._canvasID = canvas.create_rectangle(
self._xpos,
self._ypos,
self._xpos + self._vehicleSize,
self._ypos + self._vehicleSize,
fill='black',
outline='white',
)
# Move this vehicle along at near movie-refresh rate.
self._redrawRate = 1000 / 60
# Store the after's timer ID here.
self._timerID = -1
# Set this flag to true when the vehicle has
# completed its trip.
self._isDoneFlag = False
# Uncomment to see debug output.
#self._fsm.setDebugFlag(True)
def Delete(self):
if self._timerID >= 0:
self._canvas.after_cancel(self._timerID)
self._timerID = -1
self._canvas.delete(self._canvasID)
# timeout --
#
# If the vehicle has driven off the canvas, then
# delete the vehicle.
# Check if the vehicle is at the intersection and the
# light is either yellow or red. If yes, then issue a
# "LightRed" transition. If all is go, then keep on
# truckin.
#
# Arugments:
# None.
def timeout(self):
self._timerID = -1
if self.OffCanvas():
self._fsm.TripDone()
elif self.AtIntersection() and self.getLight() != 'green':
self._fsm.LightRed()
else:
self._fsm.KeepGoing()
def getLight(self):
return self._stoplight.getLight(self._direction)
# lightGreen --
#
# The light has turned green. Time to get moving again.
#
# Arguments:
# None
def lightGreen(self):
self._fsm.LightGreen()
# setSpeed --
#
# Set speed for all vehicles.
#
# Arguments:
# speed In pixels.
def setSpeed(klass, speed):
if speed < 1 or speed > 10:
print "Invalid speed (%d).\n" % speed
else:
klass._speed = speed
setSpeed = classmethod(setSpeed)
# isDone --
#
# Has this vehicle completed its trip?
#
# Arguments:
# None.
#
# Results:
# Returns true if the trip is done and false
# otherwise.
def isDone(self):
return self._isDoneFlag
# start --
#
# Start this vehicle running.
#
# Arguments:
# None.
def Start(self):
self._fsm.enterStartState()
self._fsm.Start()
# pause --
#
# Pause this vehicles' running.
#
# Arguments:
# None.
def Pause(self):
self._fsm.Pause()
# continue --
#
# Continue this vehicles' running.
#
# Arguments:
# None.
def Continue(self):
self._fsm.Continue()
# stop --
#
# Stop this vehicles' running.
#
# Arguments:
# None.
#
def Stop(self):
self._fsm.Stop()
self.Delete()
# State Machine Actions
#
# The following methods are called by the state machine.
# SetTimer --
#
# Set the timer for the next move.
#
# Arguments:
# None.
def SetTimer(self):
self._timerID = self._canvas.after(self._redrawRate, self.timeout)
# StopTimer --
#
# Stop the vehicle's timer.
#
# Arguments:
# None.
def StopTimer(self):
if self._timerID >= 0:
self._canvas.after_cancel(self._timerID)
self._timerID = -1
# Move --
#
# 1. Calculate the vehicle's new position.
# 2. Remove the vehicle from the canvas.
# 3. Draw the vehicles new position.
#
# Arguments:
# None.
#
# Results:
# None returned. Side affect of redrawing vehicle.
def Move(self):
if self._direction == 'north':
Xmove = 0
Ymove = - self._speed
elif self._direction == 'south':
Xmove = 0
Ymove = self._speed
elif self._direction == 'east':
Xmove = self._speed
Ymove = 0
elif self._direction == 'west':
Xmove = - self._speed
Ymove = 0
self._canvas.move(self._canvasID, Xmove, Ymove)
self._xpos += Xmove
self._ypos += Ymove
# RegisterWithLight --
#
# When the light turns green, it will inform us.
#
# Arguments:
# None.
def RegisterWithLight(self):
self._stoplight.registerVehicle(self, self._direction)
# SelfDestruct --
#
# Remove the vehicle from the canvas.
#
# Arguments:
# None.
def SelfDestruct(self):
self._canvas.delete(self._canvasID)
self._canvasID = -1
self._isDoneFlag = True
# OffCanvas --
#
# Figure out if the vehicle has driven off the map.
#
# Arguments:
# None.
#
# Results:
# Returns true if the vehicle is off the map; otherwise
# false.
def OffCanvas(self):
if self._direction == 'north':
return (self._ypos - self._speed) <= 0
elif self._direction == 'south':
YLength = self._stoplight.getRoadLengthY()
return (self._ypos + self._speed) >= YLength
elif self._direction == 'east':
XLength = self._stoplight.getRoadLengthX()
return (self._xpos + self._speed) >= XLength
elif self._direction == 'west':
return (self._xpos - self._speed) <= 0
# AtIntersection --
#
# Figure out whether this vehicile is at the intersection
# or not.
#
# Arguments:
# None.
#
# Results:
# Returns true if the vehicle is at the intersection;
# otherwise, false.
def AtIntersection(self):
# The vehicle is not at the intersection until proven
# otherwise.
Retval = False
XLength = self._stoplight.getRoadLengthX()
YLength = self._stoplight.getRoadLengthY()
LaneWidth = self._stoplight.getRoadWidth() / 2
# Calculate the intersections coordinates based on
# the vehicle's direction. Then calculate where the
# vehicle will end up this move. If the vehicle will
# move beyond the intersection stop line, then the
# vehicle is at the intersection.
#
# Also take into account the vehicles already waiting
# at the intersection.
#
# By the way, once the vehicle moves past the intersection,
# ignore the light.
NumVehicles = self._stoplight.getQueueSize(self._direction)
LenVehicles = (self._vehicleSize + self._vehicleSeparation) * NumVehicles
if self._direction == 'north':
YIntersection = (YLength / 2) + LaneWidth + (self._vehicleSize / 2) + LenVehicles
Retval = (self._ypos > YIntersection) and (self._ypos - self._speed <= YIntersection)
elif self._direction == 'south':
YIntersection = (YLength / 2) - LaneWidth - (self._vehicleSize / 2) - LenVehicles
Retval = (self._ypos < YIntersection) and (self._ypos + self._speed >= YIntersection)
elif self._direction == 'east':
XIntersection = (XLength / 2) - LaneWidth - (self._vehicleSize / 2) - LenVehicles
Retval = (self._xpos < XIntersection) and (self._xpos + self._speed >= XIntersection)
elif self._direction == 'west':
XIntersection = (XLength / 2) + LaneWidth + (self._vehicleSize / 2) + LenVehicles
Retval = (self._xpos > XIntersection) and (self._xpos - self._speed <= XIntersection)
return Retval
|
|
# Copyright 2013 IBM Corp.
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import math
import time
from xml.dom import minidom
from lxml import etree
import six
import webob
from nova.api.openstack import xmlutil
from nova import exception
from nova.openstack.common import gettextutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import utils
from nova import wsgi
XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
XMLNS_ATOM = 'http://www.w3.org/2005/Atom'
LOG = logging.getLogger(__name__)
SUPPORTED_CONTENT_TYPES = (
'application/json',
'application/vnd.openstack.compute+json',
'application/xml',
'application/vnd.openstack.compute+xml',
)
_MEDIA_TYPE_MAP = {
'application/vnd.openstack.compute+json': 'json',
'application/json': 'json',
'application/vnd.openstack.compute+xml': 'xml',
'application/xml': 'xml',
'application/atom+xml': 'atom',
}
# These are typically automatically created by routes as either defaults
# collection or member methods.
_ROUTES_METHODS = [
'create',
'delete',
'show',
'update',
]
_METHODS_WITH_BODY = [
'POST',
'PUT',
]
class Request(webob.Request):
"""Add some OpenStack API-specific logic to the base webob.Request."""
def __init__(self, *args, **kwargs):
super(Request, self).__init__(*args, **kwargs)
self._extension_data = {'db_items': {}}
def cache_db_items(self, key, items, item_key='id'):
"""Allow API methods to store objects from a DB query to be
used by API extensions within the same API request.
An instance of this class only lives for the lifetime of a
single API request, so there's no need to implement full
cache management.
"""
db_items = self._extension_data['db_items'].setdefault(key, {})
for item in items:
db_items[item[item_key]] = item
def get_db_items(self, key):
"""Allow an API extension to get previously stored objects within
the same API request.
Note that the object data will be slightly stale.
"""
return self._extension_data['db_items'][key]
def get_db_item(self, key, item_key):
"""Allow an API extension to get a previously stored object
within the same API request.
Note that the object data will be slightly stale.
"""
return self.get_db_items(key).get(item_key)
def cache_db_instances(self, instances):
self.cache_db_items('instances', instances, 'uuid')
def cache_db_instance(self, instance):
self.cache_db_items('instances', [instance], 'uuid')
def get_db_instances(self):
return self.get_db_items('instances')
def get_db_instance(self, instance_uuid):
return self.get_db_item('instances', instance_uuid)
def cache_db_flavors(self, flavors):
self.cache_db_items('flavors', flavors, 'flavorid')
def cache_db_flavor(self, flavor):
self.cache_db_items('flavors', [flavor], 'flavorid')
def get_db_flavors(self):
return self.get_db_items('flavors')
def get_db_flavor(self, flavorid):
return self.get_db_item('flavors', flavorid)
def cache_db_compute_nodes(self, compute_nodes):
self.cache_db_items('compute_nodes', compute_nodes, 'id')
def cache_db_compute_node(self, compute_node):
self.cache_db_items('compute_nodes', [compute_node], 'id')
def get_db_compute_nodes(self):
return self.get_db_items('compute_nodes')
def get_db_compute_node(self, id):
return self.get_db_item('compute_nodes', id)
def best_match_content_type(self):
"""Determine the requested response content-type."""
if 'nova.best_content_type' not in self.environ:
# Calculate the best MIME type
content_type = None
# Check URL path suffix
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in SUPPORTED_CONTENT_TYPES:
content_type = possible_type
if not content_type:
content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES)
self.environ['nova.best_content_type'] = (content_type or
'application/json')
return self.environ['nova.best_content_type']
def get_content_type(self):
"""Determine content type of the request body.
Does not do any body introspection, only checks header
"""
if "Content-Type" not in self.headers:
return None
content_type = self.content_type
# NOTE(markmc): text/plain is the default for eventlet and
# other webservers which use mimetools.Message.gettype()
# whereas twisted defaults to ''.
if not content_type or content_type == 'text/plain':
return None
if content_type not in SUPPORTED_CONTENT_TYPES:
raise exception.InvalidContentType(content_type=content_type)
return content_type
def best_match_language(self):
"""Determine the best available language for the request.
:returns: the best language match or None if the 'Accept-Language'
header was not available in the request.
"""
if not self.accept_language:
return None
return self.accept_language.best_match(
gettextutils.get_available_languages('nova'))
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class TextDeserializer(ActionDispatcher):
"""Default request body deserialization."""
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {}
class JSONDeserializer(TextDeserializer):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class XMLDeserializer(TextDeserializer):
def __init__(self, metadata=None):
""":param metadata: information needed to deserialize xml into
a dictionary.
"""
super(XMLDeserializer, self).__init__()
self.metadata = metadata or {}
def _from_xml(self, datastring):
plurals = set(self.metadata.get('plurals', {}))
node = xmlutil.safe_minidom_parse_string(datastring).childNodes[0]
return {node.nodeName: self._from_xml_node(node, plurals)}
def _from_xml_node(self, node, listnames):
"""Convert a minidom node to a simple Python type.
:param listnames: list of XML node names whose subnodes should
be considered list items.
"""
if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3:
return node.childNodes[0].nodeValue
elif node.nodeName in listnames:
return [self._from_xml_node(n, listnames) for n in node.childNodes]
else:
result = dict()
for attr in node.attributes.keys():
if not attr.startswith("xmlns"):
result[attr] = node.attributes[attr].nodeValue
for child in node.childNodes:
if child.nodeType != node.TEXT_NODE:
result[child.nodeName] = self._from_xml_node(child,
listnames)
return result
def find_first_child_named_in_namespace(self, parent, namespace, name):
"""Search a nodes children for the first child with a given name."""
for node in parent.childNodes:
if (node.localName == name and
node.namespaceURI and
node.namespaceURI == namespace):
return node
return None
def find_first_child_named(self, parent, name):
"""Search a nodes children for the first child with a given name."""
for node in parent.childNodes:
if node.localName == name:
return node
return None
def find_children_named(self, parent, name):
"""Return all of a nodes children who have the given name."""
for node in parent.childNodes:
if node.localName == name:
yield node
def extract_text(self, node):
"""Get the text field contained by the given node."""
ret_val = ""
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE:
ret_val += child.nodeValue
return ret_val
def extract_elements(self, node):
"""Get only Element type childs from node."""
elements = []
for child in node.childNodes:
if child.nodeType == child.ELEMENT_NODE:
elements.append(child)
return elements
def find_attribute_or_element(self, parent, name):
"""Get an attribute value; fallback to an element if not found."""
if parent.hasAttribute(name):
return parent.getAttribute(name)
node = self.find_first_child_named(parent, name)
if node:
return self.extract_text(node)
return None
def default(self, datastring):
return {'body': self._from_xml(datastring)}
class MetadataXMLDeserializer(XMLDeserializer):
def extract_metadata(self, metadata_node):
"""Marshal the metadata attribute of a parsed request."""
metadata = {}
if metadata_node is not None:
for meta_node in self.find_children_named(metadata_node, "meta"):
key = meta_node.getAttribute("key")
metadata[key] = self.extract_text(meta_node)
return metadata
class DictSerializer(ActionDispatcher):
"""Default request body serialization."""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return ""
class JSONDictSerializer(DictSerializer):
"""Default JSON request body serialization."""
def default(self, data):
return jsonutils.dumps(data)
class XMLDictSerializer(DictSerializer):
def __init__(self, metadata=None, xmlns=None):
""":param metadata: information needed to deserialize xml into
a dictionary.
:param xmlns: XML namespace to include with serialized xml
"""
super(XMLDictSerializer, self).__init__()
self.metadata = metadata or {}
self.xmlns = xmlns
def default(self, data):
# We expect data to contain a single key which is the XML root.
root_key = data.keys()[0]
doc = minidom.Document()
node = self._to_xml_node(doc, self.metadata, root_key, data[root_key])
return self.to_xml_string(node)
def to_xml_string(self, node, has_atom=False):
self._add_xmlns(node, has_atom)
return node.toxml('UTF-8')
#NOTE (ameade): the has_atom should be removed after all of the
# xml serializers and view builders have been updated to the current
# spec that required all responses include the xmlns:atom, the has_atom
# flag is to prevent current tests from breaking
def _add_xmlns(self, node, has_atom=False):
if self.xmlns is not None:
node.setAttribute('xmlns', self.xmlns)
if has_atom:
node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom")
def _to_xml_node(self, doc, metadata, nodename, data):
"""Recursive method to convert data members to XML nodes."""
result = doc.createElement(nodename)
# Set the xml namespace if one is specified
# TODO(justinsb): We could also use prefixes on the keys
xmlns = metadata.get('xmlns', None)
if xmlns:
result.setAttribute('xmlns', xmlns)
#TODO(bcwaldon): accomplish this without a type-check
if isinstance(data, list):
collections = metadata.get('list_collections', {})
if nodename in collections:
metadata = collections[nodename]
for item in data:
node = doc.createElement(metadata['item_name'])
node.setAttribute(metadata['item_key'], str(item))
result.appendChild(node)
return result
singular = metadata.get('plurals', {}).get(nodename, None)
if singular is None:
if nodename.endswith('s'):
singular = nodename[:-1]
else:
singular = 'item'
for item in data:
node = self._to_xml_node(doc, metadata, singular, item)
result.appendChild(node)
#TODO(bcwaldon): accomplish this without a type-check
elif isinstance(data, dict):
collections = metadata.get('dict_collections', {})
if nodename in collections:
metadata = collections[nodename]
for k, v in data.items():
node = doc.createElement(metadata['item_name'])
node.setAttribute(metadata['item_key'], str(k))
text = doc.createTextNode(str(v))
node.appendChild(text)
result.appendChild(node)
return result
attrs = metadata.get('attributes', {}).get(nodename, {})
for k, v in data.items():
if k in attrs:
result.setAttribute(k, str(v))
else:
if k == "deleted":
v = str(bool(v))
node = self._to_xml_node(doc, metadata, k, v)
result.appendChild(node)
else:
# Type is atom
if not isinstance(data, six.string_types):
data = six.text_type(data)
node = doc.createTextNode(data)
result.appendChild(node)
return result
def _create_link_nodes(self, xml_doc, links):
link_nodes = []
for link in links:
link_node = xml_doc.createElement('atom:link')
link_node.setAttribute('rel', link['rel'])
link_node.setAttribute('href', link['href'])
if 'type' in link:
link_node.setAttribute('type', link['type'])
link_nodes.append(link_node)
return link_nodes
def _to_xml(self, root):
"""Convert the xml object to an xml string."""
return etree.tostring(root, encoding='UTF-8', xml_declaration=True)
def serializers(**serializers):
"""Attaches serializers to a method.
This decorator associates a dictionary of serializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_serializers'):
func.wsgi_serializers = {}
func.wsgi_serializers.update(serializers)
return func
return decorator
def deserializers(**deserializers):
"""Attaches deserializers to a method.
This decorator associates a dictionary of deserializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_deserializers'):
func.wsgi_deserializers = {}
func.wsgi_deserializers.update(deserializers)
return func
return decorator
def response(code):
"""Attaches response code to a method.
This decorator associates a response code with a method. Note
that the function attributes are directly manipulated; the method
is not wrapped.
"""
def decorator(func):
func.wsgi_code = code
return func
return decorator
class ResponseObject(object):
"""Bundles a response object with appropriate serializers.
Object that app methods may return in order to bind alternate
serializers with a response object to be serialized. Its use is
optional.
"""
def __init__(self, obj, code=None, headers=None, **serializers):
"""Binds serializers with an object.
Takes keyword arguments akin to the @serializer() decorator
for specifying serializers. Serializers specified will be
given preference over default serializers or method-specific
serializers on return.
"""
self.obj = obj
self.serializers = serializers
self._default_code = 200
self._code = code
self._headers = headers or {}
self.serializer = None
self.media_type = None
def __getitem__(self, key):
"""Retrieves a header with the given name."""
return self._headers[key.lower()]
def __setitem__(self, key, value):
"""Sets a header with the given name to the given value."""
self._headers[key.lower()] = value
def __delitem__(self, key):
"""Deletes the header with the given name."""
del self._headers[key.lower()]
def _bind_method_serializers(self, meth_serializers):
"""Binds method serializers with the response object.
Binds the method serializers with the response object.
Serializers specified to the constructor will take precedence
over serializers specified to this method.
:param meth_serializers: A dictionary with keys mapping to
response types and values containing
serializer objects.
"""
# We can't use update because that would be the wrong
# precedence
for mtype, serializer in meth_serializers.items():
self.serializers.setdefault(mtype, serializer)
def get_serializer(self, content_type, default_serializers=None):
"""Returns the serializer for the wrapped object.
Returns the serializer for the wrapped object subject to the
indicated content type. If no serializer matching the content
type is attached, an appropriate serializer drawn from the
default serializers will be used. If no appropriate
serializer is available, raises InvalidContentType.
"""
default_serializers = default_serializers or {}
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in self.serializers:
return mtype, self.serializers[mtype]
else:
return mtype, default_serializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
def preserialize(self, content_type, default_serializers=None):
"""Prepares the serializer that will be used to serialize.
Determines the serializer that will be used and prepares an
instance of it for later call. This allows the serializer to
be accessed by extensions for, e.g., template extension.
"""
mtype, serializer = self.get_serializer(content_type,
default_serializers)
self.media_type = mtype
self.serializer = serializer()
def attach(self, **kwargs):
"""Attach slave templates to serializers."""
if self.media_type in kwargs:
self.serializer.attach(kwargs[self.media_type])
def serialize(self, request, content_type, default_serializers=None):
"""Serializes the wrapped object.
Utility method for serializing the wrapped object. Returns a
webob.Response object.
"""
if self.serializer:
serializer = self.serializer
else:
_mtype, _serializer = self.get_serializer(content_type,
default_serializers)
serializer = _serializer()
response = webob.Response()
response.status_int = self.code
for hdr, value in self._headers.items():
response.headers[hdr] = utils.utf8(str(value))
response.headers['Content-Type'] = utils.utf8(content_type)
if self.obj is not None:
response.body = serializer.serialize(self.obj)
return response
@property
def code(self):
"""Retrieve the response status."""
return self._code or self._default_code
@property
def headers(self):
"""Retrieve the headers."""
return self._headers.copy()
def action_peek_json(body):
"""Determine action to invoke."""
try:
decoded = jsonutils.loads(body)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
# Make sure there's exactly one key...
if len(decoded) != 1:
msg = _("too many body keys")
raise exception.MalformedRequestBody(reason=msg)
# Return the action and the decoded body...
return decoded.keys()[0]
def action_peek_xml(body):
"""Determine action to invoke."""
dom = xmlutil.safe_minidom_parse_string(body)
action_node = dom.childNodes[0]
return action_node.tagName
class ResourceExceptionHandler(object):
"""Context manager to handle Resource exceptions.
Used when processing exceptions generated by API implementation
methods (or their extensions). Converts most exceptions to Fault
exceptions, with the appropriate logging.
"""
def __enter__(self):
return None
def __exit__(self, ex_type, ex_value, ex_traceback):
if not ex_value:
return True
if isinstance(ex_value, exception.NotAuthorized):
raise Fault(webob.exc.HTTPForbidden(
explanation=ex_value.format_message()))
elif isinstance(ex_value, exception.Invalid):
raise Fault(exception.ConvertedException(
code=ex_value.code,
explanation=ex_value.format_message()))
# Under python 2.6, TypeError's exception value is actually a string,
# so test # here via ex_type instead:
# http://bugs.python.org/issue7853
elif issubclass(ex_type, TypeError):
exc_info = (ex_type, ex_value, ex_traceback)
LOG.error(_('Exception handling resource: %s') % ex_value,
exc_info=exc_info)
raise Fault(webob.exc.HTTPBadRequest())
elif isinstance(ex_value, Fault):
LOG.info(_("Fault thrown: %s"), unicode(ex_value))
raise ex_value
elif isinstance(ex_value, webob.exc.HTTPException):
LOG.info(_("HTTP exception thrown: %s"), unicode(ex_value))
raise Fault(ex_value)
# We didn't handle the exception
return False
class Resource(wsgi.Application):
"""WSGI app that handles (de)serialization and controller dispatch.
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon its controller. All
controller action methods must accept a 'req' argument, which is the
incoming wsgi.Request. If the operation is a PUT or POST, the controller
method must also accept a 'body' argument (the deserialized request body).
They may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
Exceptions derived from webob.exc.HTTPException will be automatically
wrapped in Fault() to provide API friendly error responses.
"""
def __init__(self, controller, action_peek=None, inherits=None,
**deserializers):
""":param controller: object that implement methods created by routes
lib
:param action_peek: dictionary of routines for peeking into an
action request body to determine the
desired action
:param inherits: another resource object that this resource should
inherit extensions from. Any action extensions that
are applied to the parent resource will also apply
to this resource.
"""
self.controller = controller
default_deserializers = dict(xml=XMLDeserializer,
json=JSONDeserializer)
default_deserializers.update(deserializers)
self.default_deserializers = default_deserializers
self.default_serializers = dict(xml=XMLDictSerializer,
json=JSONDictSerializer)
self.action_peek = dict(xml=action_peek_xml,
json=action_peek_json)
self.action_peek.update(action_peek or {})
# Copy over the actions dictionary
self.wsgi_actions = {}
if controller:
self.register_actions(controller)
# Save a mapping of extensions
self.wsgi_extensions = {}
self.wsgi_action_extensions = {}
self.inherits = inherits
def register_actions(self, controller):
"""Registers controller actions with this resource."""
actions = getattr(controller, 'wsgi_actions', {})
for key, method_name in actions.items():
self.wsgi_actions[key] = getattr(controller, method_name)
def register_extensions(self, controller):
"""Registers controller extensions with this resource."""
extensions = getattr(controller, 'wsgi_extensions', [])
for method_name, action_name in extensions:
# Look up the extending method
extension = getattr(controller, method_name)
if action_name:
# Extending an action...
if action_name not in self.wsgi_action_extensions:
self.wsgi_action_extensions[action_name] = []
self.wsgi_action_extensions[action_name].append(extension)
else:
# Extending a regular method
if method_name not in self.wsgi_extensions:
self.wsgi_extensions[method_name] = []
self.wsgi_extensions[method_name].append(extension)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
# NOTE(Vek): Check for get_action_args() override in the
# controller
if hasattr(self.controller, 'get_action_args'):
return self.controller.get_action_args(request_environment)
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except (KeyError, IndexError, AttributeError):
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
def get_body(self, request):
try:
content_type = request.get_content_type()
except exception.InvalidContentType:
LOG.debug(_("Unrecognized Content-Type provided in request"))
return None, ''
return content_type, request.body
def deserialize(self, meth, content_type, body):
meth_deserializers = getattr(meth, 'wsgi_deserializers', {})
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in meth_deserializers:
deserializer = meth_deserializers[mtype]
else:
deserializer = self.default_deserializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
if (hasattr(deserializer, 'want_controller')
and deserializer.want_controller):
return deserializer(self.controller).deserialize(body)
else:
return deserializer().deserialize(body)
def pre_process_extensions(self, extensions, request, action_args):
# List of callables for post-processing extensions
post = []
for ext in extensions:
if inspect.isgeneratorfunction(ext):
response = None
# If it's a generator function, the part before the
# yield is the preprocessing stage
try:
with ResourceExceptionHandler():
gen = ext(req=request, **action_args)
response = gen.next()
except Fault as ex:
response = ex
# We had a response...
if response:
return response, []
# No response, queue up generator for post-processing
post.append(gen)
else:
# Regular functions only perform post-processing
post.append(ext)
# Run post-processing in the reverse order
return None, reversed(post)
def post_process_extensions(self, extensions, resp_obj, request,
action_args):
for ext in extensions:
response = None
if inspect.isgenerator(ext):
# If it's a generator, run the second half of
# processing
try:
with ResourceExceptionHandler():
response = ext.send(resp_obj)
except StopIteration:
# Normal exit of generator
continue
except Fault as ex:
response = ex
else:
# Regular functions get post-processing...
try:
with ResourceExceptionHandler():
response = ext(req=request, resp_obj=resp_obj,
**action_args)
except Fault as ex:
response = ex
# We had a response...
if response:
return response
return None
def _should_have_body(self, request):
return request.method in _METHODS_WITH_BODY
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
# Identify the action, its arguments, and the requested
# content type
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
content_type, body = self.get_body(request)
accept = request.best_match_content_type()
# NOTE(Vek): Splitting the function up this way allows for
# auditing by external tools that wrap the existing
# function. If we try to audit __call__(), we can
# run into troubles due to the @webob.dec.wsgify()
# decorator.
return self._process_stack(request, action, action_args,
content_type, body, accept)
def _process_stack(self, request, action, action_args,
content_type, body, accept):
"""Implement the processing stack."""
# Get the implementing method
try:
meth, extensions = self.get_method(request, action,
content_type, body)
except (AttributeError, TypeError):
return Fault(webob.exc.HTTPNotFound())
except KeyError as ex:
msg = _("There is no such action: %s") % ex.args[0]
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
if body:
msg = _("Action: '%(action)s', body: "
"%(body)s") % {'action': action,
'body': unicode(body, 'utf-8')}
LOG.debug(logging.mask_password(msg))
LOG.debug(_("Calling method '%(meth)s' (Content-type='%(ctype)s', "
"Accept='%(accept)s')"),
{'meth': str(meth),
'ctype': content_type,
'accept': accept})
# Now, deserialize the request body...
try:
contents = {}
if self._should_have_body(request):
#allow empty body with PUT and POST
if request.content_length == 0:
contents = {'body': None}
else:
contents = self.deserialize(meth, content_type, body)
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Update the action args
action_args.update(contents)
project_id = action_args.pop("project_id", None)
context = request.environ.get('nova.context')
if (context and project_id and (project_id != context.project_id)):
msg = _("Malformed request URL: URL's project_id '%(project_id)s'"
" doesn't match Context's project_id"
" '%(context_project_id)s'") % \
{'project_id': project_id,
'context_project_id': context.project_id}
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Run pre-processing extensions
response, post = self.pre_process_extensions(extensions,
request, action_args)
if not response:
try:
with ResourceExceptionHandler():
action_result = self.dispatch(meth, request, action_args)
except Fault as ex:
response = ex
if not response:
# No exceptions; convert action_result into a
# ResponseObject
resp_obj = None
if type(action_result) is dict or action_result is None:
resp_obj = ResponseObject(action_result)
elif isinstance(action_result, ResponseObject):
resp_obj = action_result
else:
response = action_result
# Run post-processing extensions
if resp_obj:
# Do a preserialize to set up the response object
serializers = getattr(meth, 'wsgi_serializers', {})
resp_obj._bind_method_serializers(serializers)
if hasattr(meth, 'wsgi_code'):
resp_obj._default_code = meth.wsgi_code
resp_obj.preserialize(accept, self.default_serializers)
# Process post-processing extensions
response = self.post_process_extensions(post, resp_obj,
request, action_args)
if resp_obj and not response:
response = resp_obj.serialize(request, accept,
self.default_serializers)
if hasattr(response, 'headers'):
if context:
response.headers.add('x-compute-request-id',
context.request_id)
for hdr, val in response.headers.items():
# Headers must be utf-8 strings
response.headers[hdr] = utils.utf8(str(val))
return response
def get_method(self, request, action, content_type, body):
meth, extensions = self._get_method(request,
action,
content_type,
body)
if self.inherits:
_meth, parent_ext = self.inherits.get_method(request,
action,
content_type,
body)
extensions.extend(parent_ext)
return meth, extensions
def _get_method(self, request, action, content_type, body):
"""Look up the action-specific method and its extensions."""
# Look up the method
try:
if not self.controller:
meth = getattr(self, action)
else:
meth = getattr(self.controller, action)
except AttributeError:
if (not self.wsgi_actions or
action not in _ROUTES_METHODS + ['action']):
# Propagate the error
raise
else:
return meth, self.wsgi_extensions.get(action, [])
if action == 'action':
# OK, it's an action; figure out which action...
mtype = _MEDIA_TYPE_MAP.get(content_type)
action_name = self.action_peek[mtype](body)
else:
action_name = action
# Look up the action method
return (self.wsgi_actions[action_name],
self.wsgi_action_extensions.get(action_name, []))
def dispatch(self, method, request, action_args):
"""Dispatch a call to the action-specific method."""
return method(req=request, **action_args)
def action(name):
"""Mark a function as an action.
The given name will be taken as the action key in the body.
This is also overloaded to allow extensions to provide
non-extending definitions of create and delete operations.
"""
def decorator(func):
func.wsgi_action = name
return func
return decorator
def extends(*args, **kwargs):
"""Indicate a function extends an operation.
Can be used as either::
@extends
def index(...):
pass
or as::
@extends(action='resize')
def _action_resize(...):
pass
"""
def decorator(func):
# Store enough information to find what we're extending
func.wsgi_extends = (func.__name__, kwargs.get('action'))
return func
# If we have positional arguments, call the decorator
if args:
return decorator(*args)
# OK, return the decorator instead
return decorator
class ControllerMetaclass(type):
"""Controller metaclass.
This metaclass automates the task of assembling a dictionary
mapping action keys to method names.
"""
def __new__(mcs, name, bases, cls_dict):
"""Adds the wsgi_actions dictionary to the class."""
# Find all actions
actions = {}
extensions = []
# start with wsgi actions from base classes
for base in bases:
actions.update(getattr(base, 'wsgi_actions', {}))
for key, value in cls_dict.items():
if not callable(value):
continue
if getattr(value, 'wsgi_action', None):
actions[value.wsgi_action] = key
elif getattr(value, 'wsgi_extends', None):
extensions.append(value.wsgi_extends)
# Add the actions and extensions to the class dict
cls_dict['wsgi_actions'] = actions
cls_dict['wsgi_extensions'] = extensions
return super(ControllerMetaclass, mcs).__new__(mcs, name, bases,
cls_dict)
@six.add_metaclass(ControllerMetaclass)
class Controller(object):
"""Default controller."""
_view_builder_class = None
def __init__(self, view_builder=None):
"""Initialize controller with a view builder instance."""
if view_builder:
self._view_builder = view_builder
elif self._view_builder_class:
self._view_builder = self._view_builder_class()
else:
self._view_builder = None
@staticmethod
def is_valid_body(body, entity_name):
if not (body and entity_name in body):
return False
def is_dict(d):
try:
d.get(None)
return True
except AttributeError:
return False
if not is_dict(body[entity_name]):
return False
return True
class Fault(webob.exc.HTTPException):
"""Wrap webob.exc.HTTPException to provide API friendly response."""
_fault_names = {
400: "badRequest",
401: "unauthorized",
403: "forbidden",
404: "itemNotFound",
405: "badMethod",
409: "conflictingRequest",
413: "overLimit",
415: "badMediaType",
429: "overLimit",
501: "notImplemented",
503: "serviceUnavailable"}
def __init__(self, exception):
"""Create a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
for key, value in self.wrapped_exc.headers.items():
self.wrapped_exc.headers[key] = str(value)
self.status_int = exception.status_int
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
user_locale = req.best_match_language()
# Replace the body with fault details.
code = self.wrapped_exc.status_int
fault_name = self._fault_names.get(code, "computeFault")
explanation = self.wrapped_exc.explanation
LOG.debug(_("Returning %(code)s to user: %(explanation)s"),
{'code': code, 'explanation': explanation})
explanation = gettextutils.translate(explanation,
user_locale)
fault_data = {
fault_name: {
'code': code,
'message': explanation}}
if code == 413 or code == 429:
retry = self.wrapped_exc.headers.get('Retry-After', None)
if retry:
fault_data[fault_name]['retryAfter'] = retry
# 'code' is an attribute on the fault tag itself
metadata = {'attributes': {fault_name: 'code'}}
xml_serializer = XMLDictSerializer(metadata, XMLNS_V11)
content_type = req.best_match_content_type()
serializer = {
'application/xml': xml_serializer,
'application/json': JSONDictSerializer(),
}[content_type]
self.wrapped_exc.body = serializer.serialize(fault_data)
self.wrapped_exc.content_type = content_type
_set_request_id_header(req, self.wrapped_exc.headers)
return self.wrapped_exc
def __str__(self):
return self.wrapped_exc.__str__()
class RateLimitFault(webob.exc.HTTPException):
"""Rate-limited request response."""
def __init__(self, message, details, retry_time):
"""Initialize new `RateLimitFault` with relevant information."""
hdrs = RateLimitFault._retry_after(retry_time)
self.wrapped_exc = webob.exc.HTTPTooManyRequests(headers=hdrs)
self.content = {
"overLimit": {
"code": self.wrapped_exc.status_int,
"message": message,
"details": details,
"retryAfter": hdrs['Retry-After'],
},
}
@staticmethod
def _retry_after(retry_time):
delay = int(math.ceil(retry_time - time.time()))
retry_after = delay if delay > 0 else 0
headers = {'Retry-After': '%d' % retry_after}
return headers
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""Return the wrapped exception with a serialized body conforming
to our error format.
"""
user_locale = request.best_match_language()
content_type = request.best_match_content_type()
metadata = {"attributes": {"overLimit": ["code", "retryAfter"]}}
self.content['overLimit']['message'] = \
gettextutils.translate(
self.content['overLimit']['message'],
user_locale)
self.content['overLimit']['details'] = \
gettextutils.translate(
self.content['overLimit']['details'],
user_locale)
xml_serializer = XMLDictSerializer(metadata, XMLNS_V11)
serializer = {
'application/xml': xml_serializer,
'application/json': JSONDictSerializer(),
}[content_type]
content = serializer.serialize(self.content)
self.wrapped_exc.body = content
self.wrapped_exc.content_type = content_type
return self.wrapped_exc
def _set_request_id_header(req, headers):
context = req.environ.get('nova.context')
if context:
headers['x-compute-request-id'] = context.request_id
|
|
from . import gng2
from pygraph.classes.graph import graph
from pygraph.algorithms.minmax import cut_tree
from pygraph.algorithms.accessibility import connected_components
from .utils import __dict_reverse as dict_reverse
import itertools
import time
from numpy import array,sum,sqrt
class data_block:
"""This is the programming and user interface for GNG. data is the training dataset, should be array or list, with each element numpy arrays with the same dimension.
no_label is True or False. While set to False, the last element of each array in data will be treated as labels.
The rest of the variables are training settings for GNG."""
nodes = [] #:Weight of neurons.
gr = {} #:Topology structures, implemeted with python-graph.
def __init__(self,data,no_label = True,age_max = 300,nn_lambda = 88,ann = 0.5,bnn = 0.0005,eb = 0.05,en = 0.0006):
gng2.set_parameter(age_max,nn_lambda,ann,bnn,eb,en)
un_label = 0
timecost = time.time()
t = 0
gr = graph()
if no_label:
for n_point in data:
t += 1
gng2.step(n_point,un_label,t)
else:
for n_point in data:
t += 1
n_data = list(n_point)
n_X = array(n_data[0:-1])
n_Y = n_data[-1]
gng2.step(n_X,n_Y,t)
gng2.step(array([]),0,-1)
print('time cost',time.time() - timecost)
self.nodes = gng2.setN
self.gr = gng2.gr
print(len(self.nodes))
def output_graph(self):
"""Return the topology structure as a python-graph."""
return self.gr
def output_nodes(self):
"""Return the list of neuron weights."""
return self.nodes
def graph_features(self):
"""Generating topological features including vertice orders for future use."""
gr_nodes = self.gr.nodes()
gr_edges = self.gr.edges()
node_count = len(gr_nodes)
edge_count = len(gr_edges) / 2.0
average_order = 0.0
clustering_coefficient = 0.0
max_order = 0
for each_node in gr_nodes:
#for orders
current_node_order = self.gr.node_order(each_node)
average_order += current_node_order
max_order = max(max_order,current_node_order)
#now for clustering coefficient
direct_neighbors = self.gr.neighbors(each_node)
tmp_v_edge_count = 0.0
tmp_r_edge_count = 0.0
for virtual_edge in itertools.product(direct_neighbors,direct_neighbors):
if virtual_edge[0] != virtual_edge[1]:
tmp_v_edge_count += 1.0
if self.gr.has_edge(tuple(virtual_edge)):
tmp_r_edge_count += 1.0
if tmp_v_edge_count == 0:
clustering_coefficient += 0.0
else:
clustering_coefficient += (tmp_r_edge_count / tmp_v_edge_count)
clustering_coefficient /= float(node_count)
average_order /= float(node_count)
#for kernel order
cut_dict = cut_tree(self.gr)
cut_places = set(cut_dict.values())
how_many_kernel_orders = list(range(5))
kernel_orders = []
bloods = 0.0
for kernel_tick in how_many_kernel_orders:
if kernel_tick in cut_places:
bloods += 1.0
kernel_orders.append(bloods)
#for redundant edges and missing edges
redundant_edges = 0.0
missing_edges = 0.0
for each_edge in gr_edges:
node0 = each_edge[0]
node1 = each_edge[1]
#find common set of nodes' neighbors
common_set = set(self.gr.neighbors(node0)).intersection(set(self.gr.neighbors(node1)))
if len(common_set) == 0:
missing_edges += 1.0
elif len(common_set) > 1:
in_cell_edges = list(itertools.combinations(list(common_set),2))
cell_judge = True
for cell_edge in in_cell_edges:
if self.gr.has_edge(cell_edge):
cell_judge = False
if cell_judge == False:
redundant_edges += 1.0
if edge_count != 0.0:
redundant_edges /= float(edge_count)
missing_edges /= float(edge_count)
#average edge lenghth
total_length = 0.0
for each_edge in gr_edges:
node0 = each_edge[0]
node1 = each_edge[1]
total_length += sqrt(sum((self.nodes[node0] - self.nodes[node1])**2))
if len(gr_edges) == 0:
average_length = 0.0
else:
average_length = total_length / float(len(gr_edges))
return [average_length,node_count,edge_count,average_order,max_order,redundant_edges,missing_edges] + kernel_orders
def draw_2d(self, scale = 1, axis_ = False):
"""Draws the topology structure and neurons. scale is real number, it can be set arbitrarily to adjust the size
of drawed neuron clusters. axis is True or False, and means weither to enable axis in the final drawings.
In this method, MDS is used for drawing high dimensional Euclidean graphs. If you do not use this method, sklearn is
not a prerequisite for running the pygks software."""
groups = connected_components(self.gr)
if len(self.nodes[0]) != 2:
print('using MDS for none 2d drawing')
from sklearn import manifold
from sklearn.metrics import euclidean_distances
similarities = euclidean_distances(self.nodes)
for i in range(len(self.nodes)):
for j in range(len(self.nodes)):
if groups[i] == groups[j]:
similarities[i,j] *= scale
mds = manifold.MDS(n_components=2, max_iter=500, eps=1e-7,dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
draw_nodes = pos
else:
draw_nodes = self.nodes
print('now_drawing')
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
node_count = len(draw_nodes)
for i in range(node_count):
for j in range(i,node_count):
if self.gr.has_edge((i,j)):
ax.plot([draw_nodes[i][0], draw_nodes[j][0]],[draw_nodes[i][1], draw_nodes[j][1]], color='k', linestyle='-', linewidth=1)
group_counts = len(set(groups.values()))
style_tail = ['.','o','x','^','s','+']
style_head = ['b','r','g','k']
style_list = []
for each in itertools.product(style_head,style_tail):
style_list.append(each[0]+each[1])
i = 0
for each in draw_nodes:
ax.plot(each[0],each[1],style_list[groups[i]-1])
i += 1
if not axis_:
plt.axis('off')
plt.show()
def outlier_nn(self,positive = 1,negative = -1):
"""This method finds the largest neuron cluster. If a neuron belongs to this cluster, a label specified by positive will
be added to this neuron, else this neuron will be labeled by negative variable. The labeled results will be outputed in a
list as labels_final."""
groups = connected_components(self.gr)
#find the largest group
group_counts = dict_reverse(groups)
max_count = 0
for keys,values in list(group_counts.items()):
if len(values) > max_count:
max_count = len(values)
max_group = keys
affines = {}
for keys,values in list(groups.items()):
if values == max_group:
affines[values] = positive
else:
affines[values] = negative
#this is only for outlier detection
for values in list(groups.values()):
if values not in list(affines.keys()):
affines[values] = -1
for keys,values in list(groups.items()):
groups[keys] = affines[values]
labels_final = []
for i in range(len(self.nodes)):
labels_final.append(groups[i])
print(labels_final)
return self.nodes, labels_final
def counts(self):
"""Output the winning times of each neuron and the accumulated errors of the GNG network."""
return gng2.accumulated, gng2.accumulated_error
if __name__ == '__main__':
print('sb')
|
|
# -*- coding: utf-8 -*-
"""Choices for input_choice."""
#
# (C) Pywikibot team, 2015-2016
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import pywikibot
class Option(object):
"""
A basic option for input_choice.
The following methods need to be implemented:
* format(default=None)
* result(value)
* test(value)
The methods C{test} and C{handled} are in such a relationship that when
C{handled} returns itself that C{test} must return True for that value. So
if C{test} returns False C{handled} may not return itself but it may return
not None.
Also C{result} only returns a sensible value when C{test} returns True for
the same value.
"""
def __init__(self, stop=True):
"""Constructor."""
super(Option, self).__init__()
self._stop = stop
@staticmethod
def formatted(text, options, default=None):
"""Create a text with the options formatted into it."""
formatted_options = []
for option in options:
formatted_options.append(option.format(default=default))
return '{0} ({1})'.format(text, ', '.join(formatted_options))
@property
def stop(self):
"""Return whether this option stops asking."""
return self._stop
def handled(self, value):
"""
Return the Option object that applies to the given value.
If this Option object doesn't know which applies it returns None.
"""
if self.test(value):
return self
else:
return None
def format(self, default=None):
"""Return a formatted string for that option."""
raise NotImplementedError()
def result(self, value):
"""Return the actual value which is associated by the given one."""
raise NotImplementedError()
def test(self, value):
"""Return True whether this option applies."""
raise NotImplementedError()
class OutputOption(Option):
"""An option that never stops and can output on each question."""
before_question = False
@property
def stop(self):
"""Never stop asking."""
return False
def result(self, value):
"""Just output the value."""
self.output()
def output(self):
"""Output a string when selected and possibily before the question."""
raise NotImplementedError()
class StandardOption(Option):
"""An option with a description and shortcut and returning the shortcut."""
def __init__(self, option, shortcut, stop=True):
"""Constructor."""
super(StandardOption, self).__init__(stop)
self.option = option
self.shortcut = shortcut.lower()
def format(self, default=None):
"""Return a formatted string for that option."""
index = self.option.lower().find(self.shortcut)
shortcut = self.shortcut
if self.shortcut == default:
shortcut = self.shortcut.upper()
if index >= 0:
return '{0}[{1}]{2}'.format(self.option[:index], shortcut,
self.option[index + len(self.shortcut):])
else:
return '{0} [{1}]'.format(self.option, shortcut)
def result(self, value):
"""Return the lowercased shortcut."""
return self.shortcut
def test(self, value):
"""Return True whether this option applies."""
return (self.shortcut.lower() == value.lower() or
self.option.lower() == value.lower())
class OutputProxyOption(OutputOption, StandardOption):
"""An option which calls output of the given output class."""
def __init__(self, option, shortcut, output):
"""Create a new option for the given sequence."""
super(OutputProxyOption, self).__init__(option, shortcut)
self._outputter = output
def output(self):
"""Output the contents."""
self._outputter.output()
class NestedOption(OutputOption, StandardOption):
"""
An option containing other options.
It will return True in test if this option applies but False if a sub
option applies while handle returns the sub option.
"""
def __init__(self, option, shortcut, description, options):
"""Constructor."""
super(NestedOption, self).__init__(option, shortcut, False)
self.description = description
self.options = options
def format(self, default=None):
"""Return a formatted string for that option."""
self._output = Option.formatted(self.description, self.options)
return super(NestedOption, self).format(default=default)
def handled(self, value):
"""Return itself if it applies or the appling sub option."""
for option in self.options:
handled = option.handled(value)
if handled is not None:
return handled
else:
return super(NestedOption, self).handled(value)
def output(self):
"""Output the suboptions."""
pywikibot.output(self._output)
class ContextOption(OutputOption, StandardOption):
"""An option to show more and more context."""
def __init__(self, option, shortcut, text, context, delta=100, start=0, end=0):
"""Constructor."""
super(ContextOption, self).__init__(option, shortcut, False)
self.text = text
self.context = context
self.delta = delta
self.start = start
self.end = end
def result(self, value):
"""Add the delta to the context and output it."""
self.context += self.delta
super(ContextOption, self).result(value)
def output(self):
"""Output the context."""
start = max(0, self.start - self.context)
end = min(len(self.text), self.end + self.context)
self.output_range(start, end)
def output_range(self, start_context, end_context):
"""Output a section from the text."""
pywikibot.output(self.text[start_context:end_context])
class IntegerOption(Option):
"""An option allowing a range of integers."""
def __init__(self, minimum=1, maximum=None, prefix=''):
"""Constructor."""
super(IntegerOption, self).__init__()
if not ((minimum is None or isinstance(minimum, int)) and
(maximum is None or isinstance(maximum, int))):
raise ValueError(
'The minimum and maximum parameters must be int or None.')
if minimum is not None and maximum is not None and minimum > maximum:
raise ValueError('The minimum must be lower than the maximum.')
self._min = minimum
self._max = maximum
self.prefix = prefix
def test(self, value):
"""Return whether the value is an int and in the specified range."""
try:
value = self.parse(value)
except ValueError:
return False
else:
return ((self.minimum is None or value >= self.minimum) and
(self.maximum is None or value <= self.maximum))
@property
def minimum(self):
"""Return the lower bound of the range of allowed values."""
return self._min
@property
def maximum(self):
"""Return the upper bound of the range of allowed values."""
return self._max
def format(self, default=None):
"""Return a formatted string showing the range."""
if default is not None and self.test(default):
value = self.parse(default)
default = '[{0}]'.format(value)
else:
value = None
default = ''
if self.minimum is not None or self.maximum is not None:
if default and value == self.minimum:
minimum = default
default = ''
else:
minimum = '' if self.minimum is None else str(self.minimum)
if default and value == self.maximum:
maximum = default
default = ''
else:
maximum = '' if self.maximum is None else str(self.maximum)
default = '-{0}-'.format(default) if default else '-'
if self.minimum == self.maximum:
rng = minimum
else:
rng = minimum + default + maximum
else:
rng = 'any' + default
return '{0}<number> [{1}]'.format(self.prefix, rng)
def parse(self, value):
"""Return integer from value with prefix removed."""
if value.lower().startswith(self.prefix.lower()):
return int(value[len(self.prefix):])
else:
raise ValueError('Value does not start with prefix')
def result(self, value):
"""Return the value converted into int."""
return (self.prefix, self.parse(value))
class ListOption(IntegerOption):
"""An option to select something from a list."""
def __init__(self, sequence, prefix=''):
"""Constructor."""
self._list = sequence
try:
super(ListOption, self).__init__(1, self.maximum, prefix)
except ValueError:
raise ValueError('The sequence is empty.')
del self._max
def format(self, default=None):
"""Return a string showing the range."""
if not self._list:
raise ValueError('The sequence is empty.')
else:
return super(ListOption, self).format(default=default)
@property
def maximum(self):
"""Return the maximum value."""
return len(self._list)
def result(self, value):
"""Return a tuple with the prefix and selected value."""
return (self.prefix, self._list[self.parse(value) - 1])
class HighlightContextOption(ContextOption):
"""Show the original region highlighted."""
def output_range(self, start, end):
"""Show normal context with a red center region."""
pywikibot.output(self.text[start:self.start] + '\03{lightred}' +
self.text[self.start:self.end] + '\03{default}' +
self.text[self.end:end])
class ChoiceException(StandardOption, Exception):
"""A choice for input_choice which result in this exception."""
def result(self, value):
"""Return itself to raise the exception."""
return self
class QuitKeyboardInterrupt(ChoiceException, KeyboardInterrupt):
"""The user has cancelled processing at a prompt."""
def __init__(self):
"""Constructor using the 'quit' ('q') in input_choice."""
super(QuitKeyboardInterrupt, self).__init__('quit', 'q')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.