file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
utils.py | newimagedb.close()
return True
#=================================================================================
# Eigentextures
#=================================================================================
class Eigentextures:
'''
This class implements the principal components analysis (PCA) or
the eigendecomposition of high dimensional vectors.
This class was designed having in mind its use for whole images or
parts of images, or simply, textures.
If the images contain faces, the algorithm implemented here is
equivalent to the Eigenfaces algorithm presented by Matthew Turk and
Alex Pentland.
The variables names correspond to the variables present in the paper
"Eigenfaces for Recognition", Matthew Turk and Alex Pentland, 1991.
Key arguments:
trainingset - Array of rasterized textures. Each sample corresponds to
a row of the array.
evr - Explained variance ratio: indicates how much of the overall variance
is explained by the corresponding principal component or eigenvector.
numpc - This parameter is used to inform how many principal components the
function should consider.
'''
def __init__(self,trainingset,verbose=False):
self.__verbose=verbose
self.M=trainingset.shape[0]
self.N=trainingset.shape[1]
# STEP 1
# Gamma is the matrix which columns are the rasterized pixels of each image of
# the training set
Gamma=np.transpose(trainingset)
# STEP 2
# Compute Psi, that is the average texture over the training set.
Psi=Gamma.mean(1)
self.Psi=Psi
Psi=(Psi.round()).astype(np.int32)
Psi=np.reshape(Psi,(Psi.shape[0],1))
# STEP 3
# Subtracts the average face from all samples, creating a zero mean
# distribution Phi.
self.__Phi=np.asarray(np.zeros(Gamma.shape),dtype=np.int32)
self.__Phi=Gamma-Psi
del Gamma
del trainingset
if self.__verbose==True: print "Eigentextures:\tPhi created successfully."
# STEP 4
# A minor product of the covariance matrix is calculated.
Phi_t=np.transpose(self.__Phi)
L=np.dot(Phi_t,self.__Phi)
del Phi_t
L=L/self.M
if self.__verbose==True: print "Eigentextures:\tMinor product generated successfully."
# STEP 5
# Calculates the eigenvalues(w) and eigenvectors(v) of
# the minor product L.
self.__w,self.__v=la.eig(L)
del L
# STEP 6
# Order the eigenvalues and their corresponding eigenvectors
# in the descending order.
indices=np.argsort(self.__w)
indices=indices[::-1] # descending order
self.__w=self.__w[indices]
self.__v=self.__v[:,indices]
# Calculating the explained variance ratio.
self.evr=self.__w/np.sum(self.__w)
if self.__verbose==True: print "Eigentextures:\tObject created succesfully."
return
def getEigentextures(self,numpc="all"):
# Calculates the eigenvectors of the original covariance matrix
if numpc=='all':
self.__u=np.asarray(np.zeros((self.N,self.M)))
for col in range(self.M):
if self.__verbose==True: print "Calculating eigentexture "+str(col+1)
h=np.dot(self.__Phi,self.__v[:,col])
h=h/la.norm(h)
self.__u[:,col]=h
return self.__u
elif numpc>0 and numpc<=self.M:
numpc=int(numpc+0.5)
self.__u=np.asarray(np.zeros((self.N,numpc)))
for col in range(numpc):
if self.__verbose==True: print "Calculating eigentexture "+str(col+1)
h=np.dot(self.__Phi,self.__v[:,col])
h=h/la.norm(h)
self.__u[:,col]=h
return self.__u
else:
print "Eigentextures:\tInvalid value for numpc."
return
def getEigentexturesEVR(self,variance=1):
# Calculates the eigenvectors of the original covariance matrix
if variance>=1:
self.__u=np.asarray(np.zeros((self.N,self.M)))
for col in range(self.M):
if self.__verbose==True: print "Calculating eigentexture "+str(col+1)
h=np.dot(self.__Phi,self.__v[:,col])
h=h/la.norm(h)
self.__u[:,col]=h
return self.__u
elif variance<1 and variance>0:
cols=np.where(np.cumsum(self.evr)<=variance)[0]
self.__u=np.asarray(np.zeros((self.N,len(cols))))
for col in cols:
if self.__verbose==True: print "Calculating eigentexture "+str(col+1)
h=np.dot(self.__Phi,self.__v[:,col])
h=h/la.norm(h)
self.__u[:,col]=h
return self.__u
else:
print "Eigentextures:\t Invalid explained value ratio parameter."
return
def saveEigentextures2File(self,filename,numpc="all"):
u=self.getEigentextures(numpc)
dumpMatrix2File(u,filename)
return
def saveEVR2File(self,filename,variance=1):
u=self.getEigentexturesEVR(variance)
dumpMatrix2File(u,filename)
return
#=================================================================================
# PCA
#=================================================================================
class PCA:
'''
This class is a simple implementation of principal components analysis (PCA)
through the computation of the eigenvectors of the covariance matrix of a training
set of samples. For high dimensional vectors, see the class Eigentextures.
Key arguments:
trainingset - Matrix of samples. Each sample corresponds to
a row of the array.
evr - Explained variance ratio: indicates how much of the overall variance
is explained by the corresponding principal component or eigenvector.
numpc - This parameter is used to inform how many principal components the
function should consider.
'''
def __init__(self,trainingset,verbose=False):
self.__verbose=verbose
self.N=trainingset.shape[0] # number of samples/trials
self.M=trainingset.shape[1] # number of dimensions (size of the sample vector)
# STEP 2
# Compute Psi, that is the average vector considering the training set
Psi=trainingset.mean(0)
self.Psi=Psi
# STEP 3
# Subtracts the average from all samples, creating a zero mean
# distribution Phi.
Phi=np.asarray(np.zeros(trainingset.shape))
Phi=trainingset-Psi
if self.__verbose==True: print "PCA:\tPhi created successfully."
# STEP 4
# Computes the covariance matrix.
# covariance=1/((N-1)*trainingset_t*trainingset)
Phi_t=np.transpose(Phi) # M x N matrix
covariance=(np.dot(Phi_t,Phi))/(self.N-1)
self.cov=covariance
self.__w,self.__v=la.eig(covariance)
# The covariance is a positive semi-definite matrix
# and all of its eigenvalues are positive.
# However, the linalg.eig function may return small and
# negative eigenvalues. Before calculating the explained variance ratio,
# values below 1e-10 are made equal zero.
self.__w=np.where(self.__w<=1e-10,0,self.__w)
# Putting eigenvectors in the descending order of eigenvalues
indices=np.argsort(self.__w)
indices=indices[::-1]
self.__w=self.__w[indices]
self.__v=self.__v[:,indices]
# Calculating the explained variance ratio.
self.evr=self.__w/np.sum(self.__w)
if self.__verbose==True: print "PCA:\tObject created succesfully."
return
def getPC(self,numpc="all"):
# Calculates the eigenvectors of the original covariance matrix
if numpc=='all':
return self.__v
elif numpc>0 and numpc<=self.N:
numpc=int(numpc)
return self.__v[:,0:numpc]
else:
print "PCA:\tInvalid value for numpc."
return
def getEVR(self,variance=1):
# Calculates the eigenvectors of the original covariance matrix
if variance>=1:
return self.__v
elif variance<1 and variance>0:
cols=np.where(np.cumsum(self.evr)<=variance)[0]
return self.__v[:,cols]
else:
print "PCA:\t Invalid explained variance ratio parameter."
return
def savePC2File(self,filename,numpc="all"):
| v=self.getPC(numpc)
dumpMatrix2File(v,filename)
return | identifier_body |
|
utils.py | image[0:ldrows,ldcols:ldcols+image.shape[1]]=image[image.shape[0]-ldrows:image.shape[0],:]
# Fills bottom border
dummyimage[(ldrows+image.shape[0]):,ldcols:(ldcols+image.shape[1])]=image[0:udrows,:]
# Fills left border
dummyimage[ldrows:ldrows+image.shape[0],0:ldcols]=image[:,image.shape[1]-ldcols:]
# Fills right border
dummyimage[ldrows:ldrows+image.shape[0],(ldcols+image.shape[1]):]=image[:,0:udcols]
# Fills top, left corner
dummyimage[0:ldrows,0:ldcols]=image[image.shape[0]-ldrows,image.shape[1]-ldcols]
# Fills bottom, left corner
dummyimage[(ldrows+image.shape[0]):,0:ldcols]=image[0:udrows,(image.shape[1]-ldcols):]
# Fills top, right corner
dummyimage[0:ldrows,(ldcols+image.shape[1]):]=image[(image.shape[0]-ldrows):,0:udcols]
# Fills bottom, right corner
dummyimage[(ldrows+image.shape[0]):,(ldcols+image.shape[1]):]=image[0:udrows,0:udcols]
dummyimage[ldrows:ldrows+image.shape[0],ldcols:ldcols+image.shape[1]]=image
result=np.asarray(np.zeros(shaperesult))
pts[:,0]=pts[:,0]+ldrows
pts[:,1]=pts[:,1]+ldcols
for k in range(len(pts)):
total=0
for i in range(-ldrows,udrows+1):
for j in range(-ldcols,udcols+1):
total=total+dummyimage[i+pts[k,0],j+pts[k,1]]*kernel[i+ldrows,j+ldcols]
result[k]=total
return result
#========================================================================
# cropnscaleImageDB
#========================================================================
def cropnscaleImageDB(imagedb,newimagedb,ox,oy,width,height,scale,folder="",verbose=False):
"""
Applies a crop (region of interest) followed by a scale operation
on a set of images listed on an image database.
The feature points on the image databased are modified to reflect
the operations.
Key arguments:
imagedb -- filename/path of the image database
newimagedb -- name of the file that will be created
ox -- x origin of the crop operation
oy -- y origin of the crop operation
width -- width of the region of interest
height -- height of the region of interest
scale -- used to resize the region of interest
folder -- where the images are going to be saved; if not provided,
a new directory is created automatically.
verbose -- If True provides feedback about the images being processed
"""
import procdb
import os
images,shapes,labels=procdb.processImageDB(imagedb)
shapes=np.asarray(shapes)
#print shapes.shape
if verbose==True:
print str(len(images))+" images to process."
suffix="_"+str(int(width*scale))+"x"+str(int(height*scale))
if folder=="":
folder=str(int(width*scale))+"x"+str(int(height*scale))
if not os.path.exists(folder): os.makedirs(folder)
else:
if not os.path.exists(folder):os.makedirs(folder)
newimagedb=open(folder+"/"+newimagedb,'w')
for i in range(len(images)):
im=cv2.imread(images[i])
im_cropped=crop(im,ox,oy,width,height)
newheight=int(height*scale)
newwidth=int(width*scale)
im_resized=np.asarray(np.zeros((newheight,newwidth)))
im_resized=cv2.resize(im_cropped,(newwidth,newheight),im_resized,scale,scale,cv2.INTER_AREA)
fileName, fileExtension = os.path.splitext(images[i])
retval=cv2.imwrite(folder+"/"+fileName+suffix+fileExtension,im_resized)
if retval==False:
print "Problem to save modified image."
return False
shapes[i,:,0]=shapes[i,:,0]-ox
shapes[i,:,1]=shapes[i,:,1]-oy
shapes[i]=shapes[i]*scale
newshapes=''
for j in range(shapes.shape[1]):
newshapes=newshapes+',('+str(shapes[i,j,0])+';'+str(shapes[i,j,1])+')'
newlabels=''
for k in range(len(labels[i])):
newlabels=newlabels+','+str(labels[i][k])
newimagedb.write(fileName+suffix+fileExtension+newlabels+newshapes+'\n')
if verbose==True:
print "Image "+str(i+1)+" successfully processed."
newimagedb.close()
return True
#=================================================================================
# Eigentextures
#=================================================================================
class Eigentextures:
'''
This class implements the principal components analysis (PCA) or
the eigendecomposition of high dimensional vectors.
This class was designed having in mind its use for whole images or
parts of images, or simply, textures.
If the images contain faces, the algorithm implemented here is
equivalent to the Eigenfaces algorithm presented by Matthew Turk and
Alex Pentland.
The variables names correspond to the variables present in the paper
"Eigenfaces for Recognition", Matthew Turk and Alex Pentland, 1991.
Key arguments:
trainingset - Array of rasterized textures. Each sample corresponds to
a row of the array.
evr - Explained variance ratio: indicates how much of the overall variance
is explained by the corresponding principal component or eigenvector.
numpc - This parameter is used to inform how many principal components the
function should consider.
'''
def __init__(self,trainingset,verbose=False):
self.__verbose=verbose
self.M=trainingset.shape[0]
self.N=trainingset.shape[1]
# STEP 1
# Gamma is the matrix which columns are the rasterized pixels of each image of
# the training set
Gamma=np.transpose(trainingset)
# STEP 2
# Compute Psi, that is the average texture over the training set.
Psi=Gamma.mean(1)
self.Psi=Psi
Psi=(Psi.round()).astype(np.int32)
Psi=np.reshape(Psi,(Psi.shape[0],1))
# STEP 3
# Subtracts the average face from all samples, creating a zero mean
# distribution Phi.
self.__Phi=np.asarray(np.zeros(Gamma.shape),dtype=np.int32)
self.__Phi=Gamma-Psi
del Gamma
del trainingset
if self.__verbose==True: print "Eigentextures:\tPhi created successfully."
# STEP 4
# A minor product of the covariance matrix is calculated.
Phi_t=np.transpose(self.__Phi)
L=np.dot(Phi_t,self.__Phi)
del Phi_t
L=L/self.M
if self.__verbose==True: print "Eigentextures:\tMinor product generated successfully."
# STEP 5
# Calculates the eigenvalues(w) and eigenvectors(v) of
# the minor product L.
self.__w,self.__v=la.eig(L)
del L
# STEP 6
# Order the eigenvalues and their corresponding eigenvectors
# in the descending order.
indices=np.argsort(self.__w)
indices=indices[::-1] # descending order
self.__w=self.__w[indices]
self.__v=self.__v[:,indices]
# Calculating the explained variance ratio.
self.evr=self.__w/np.sum(self.__w)
if self.__verbose==True: print "Eigentextures:\tObject created succesfully."
return
def getEigentextures(self,numpc="all"):
# Calculates the eigenvectors of the original covariance matrix
if numpc=='all':
self.__u=np.asarray(np.zeros((self.N,self.M)))
for col in range(self.M):
if self.__verbose==True: print "Calculating eigentexture "+str(col+1)
h=np.dot(self.__Phi,self.__v[:,col])
h=h/la.norm(h)
self.__u[:,col]=h
return self.__u
elif numpc>0 and numpc<=self.M:
numpc=int(numpc+0.5)
self.__u=np.asarray(np.zeros((self.N,numpc)))
for col in range(numpc):
if self.__verbose==True: print "Calculating eigentexture "+str(col+1)
h=np.dot(self.__Phi,self.__v[:,col])
h=h/la.norm(h)
self.__u[:,col]=h
return self.__u
else:
print "Eigentextures:\tInvalid value for numpc."
return
def | getEigentexturesEVR | identifier_name |
|
utils.py | .asarray(np.zeros(shapes.shape[0]))
ay=np.asarray(np.zeros(shapes.shape[0]))
tx=np.asarray(np.zeros(shapes.shape[0]))
ty=np.asarray(np.zeros(shapes.shape[0]))
# The "while" loop checks the convergence of the alignment.
# The convergence is checked measuring the difference of previous mean_shape
# an the last calculated mean shape.
error=sum(abs(np.ravel(previous_mean_shape)-np.ravel(mean_shape)))
print "error = "+str(error)
while (error>0.0001):
print sum(abs(np.ravel(previous_mean_shape)-np.ravel(mean_shape)))
print 'Iteration ',it
it=it+1
previous_mean_shape=np.copy(mean_shape)
# Normalizing the mean shape to the first shape
axm,aym,txm,tym=alignPairShapes(shapes[0],mean_shape,weights)
mean_shape=RST(mean_shape,axm,aym,txm,tym)
# Align all shapes to the mean shape
for i in range(len(images)):
#print 'Aligning shape '+str(i)
ax[i],ay[i],tx[i],ty[i]=alignPairShapes(mean_shape,shapes[i],weights)
aligned_shapes[i]=RST(shapes[i],ax[i],ay[i],tx[i],ty[i])
# Calculate new mean shape
mean_shape=np.add.reduce(aligned_shapes)/float(aligned_shapes.shape[0])
#print mean_shape
error=sum(abs(np.ravel(previous_mean_shape)-np.ravel(mean_shape)))
print "error = "+str(error)
if save_aligned_images==True:
for i in range(len(images)):
im=cv2.imread(images[i])
dsize=(im.shape[1],im.shape[0])
T=np.asarray([[ax[i],-ay[i],tx[i]],[ay[i],ax[i],ty[i]]])
im=cv2.warpAffine(im,T,dsize)
fileName, fileExtension = os.path.splitext(os.path.basename(images[i]))
cv2.imwrite(fileName+'_aligned'+fileExtension,im)
return mean_shape,aligned_shapes
#==================================================
# dist
#
# Calculates the euclidean distance between two points
# given their coordinates (x,y) and (u,v)
#==================================================
def dist(x,y,u,v):
dist=np.sqrt(pow((x-u),2)+pow((y-v),2))
return dist
#============================================================
# nearest_point
#
# Given a set of points defined by the sequence of
# coordinates in the vectors 'x' e 'y', the function
# returns two vectors that determines the nearest point and
# the calculated distance between the corresponding point
# and the remaining points.
# EXAMPLE:
# Consider the set of points:
# P1=(10,20)
# P2=(11,21)
# P3=(100,200)
# The input to the function will be:
# x=([10,11,100])
# y=([20,21,200])
# P2 is the nearest point to P1 and vice-versa.
# P2 is the nearest point of P3.
# So, the function will return:
# indices: [1,0,1]
# distances:[1.41,1.41,199.9]
#
#============================================================
def nearest_point(x,y):
x=np.asarray(x)
y=np.asarray(y)
number_of_points=x.shape[0]
ut1,ut2=np.triu_indices(number_of_points,1) #without the main diagonal
distances=dist(x[ut1[:]],y[ut1[:]],x[ut2[:]],y[ut2[:]])
d_matrix=np.array(np.zeros((number_of_points,number_of_points)))
d_matrix[ut1[:],ut2[:]]=distances[:]
d_matrix[ut2[:],ut1[:]]=distances[:]
d_matrix[range(number_of_points),range(number_of_points)]=np.max(distances)
min_indices=np.array(np.zeros(number_of_points))
min_dist=np.array(np.zeros(number_of_points))
for i in range(number_of_points):
min_indices[i]=int(np.argmin(d_matrix[i,:]))
min_dist[i]=d_matrix[i,np.uint8(min_indices[i])]
print min_indices[i]
return min_indices,min_dist
#============================================================
# reticulate
#
# Creates an 1 channel image/array of dimensions h x w pixels
# with a reticulate that is spaced s pixels, with lines l
# pixels large. (Background is white, net is black)
#============================================================
def reticulate(h=302,w=527,s=15,l=2):
ret=np.array(np.zeros((h,w)))
ret=ret+255
for i in range(l):
ret[:,i::s]=0
ret[i::s]=0
return ret
#===================================================================
# crop
#
# im -> image (numpy array)
# ox -> column to start crop (column included in cropped image)
# oy -> row to start crop (row included in cropped image)
# width -> of final image
# height -> of final image
#===================================================================
def crop(im,ox,oy,width,height):
cropped_image=im[oy:(oy+height),ox:(ox+width)]
return cropped_image
#========================================================================
# addcoltofile
#
# filename -> the array will be added as a column to this file
# a -> array (will be transformed on a 1d array)
# sep -> separator string
#
#========================================================================
def addcoltofile(filename,a,sep):
a=np.ravel(np.asarray(a))
try:
f=open(filename,'r+')
except IOError:
try:
f=open(filename,'w+r+')
except IOError:
print "IOError."
#return
line=f.readline()
if line=="":
# File is empty
for i in range(len(a)):
f.write(str(a[i])+'\n')
else:
EOF=False
pointer_to_write=0
pointer_to_read=f.tell()
new_line=line.rstrip('\n')+sep+str(a[0])+'\n'
#print 'new_line= '+new_line
invasion=len(new_line)-len(line)
#print 'size of invasion='+str(invasion)
#print 'pointer_to_write='+str(pointer_to_write)
#print 'pointer_to_read='+str(pointer_to_read)
buf=""
for i in range(1,len(a)+1):
#print EOF
if EOF==False:
aux=f.read(invasion)
buf=buf+aux
#print "Invasion read: "+str(aux)
aux=""
while (aux.find('\n')==-1) and (EOF==False):
aux=f.read(1)
buf=buf+aux
#print 'updated buffer= \n'+buf
if aux=="":
# Reached EOF
EOF=True
#print 'EOF'
break
pointer_to_read=f.tell()
f.seek(pointer_to_write)
f.write(new_line)
pointer_to_write=f.tell()
f.seek(pointer_to_read)
#print 'pointer_to_read='+str(pointer_to_read)
#print 'pointer_to_write='+str(pointer_to_write)
if i<(len(a)):
|
else:
break
f.seek(pointer_to_write)
if f.readline()!="":
print "Attention!The provided array has less elements than\n"
print "the number of lines in the file."
f.close()
return
#========================================================================
# visualCheckImageDB
#
# imagedb -> CSV filename
# imagedbtype -> 0 for a complete database (filenames+labels+shape)
# 1 for the simple database (filenames+shape)
# zoom -> to scale image on screen
#
#
#========================================================================
def visualcheckImageDB(imagedb,imagedbtype=0,zoom=0.5):
import procdb
if imagedbtype==0:
images,shape,labels=procdb.processImageDB(imagedb)
else:
images,shape=procdb.processImageDB2(imagedb)
shape=np.asarray(shape)
print shape
for i in range(len(images)):
im=Image.open(images[i])
im=drawPointsOnImage(im,shape[i,:,0],shape[i,:,1])
im=im.resize((int(im.size[0]*zoom+0.5),int(im.size[1]*zoom+0.5)))
print images[i]
im.show()
raw_input('Press ENTER to proceed to next image...')
return
#========================================================================
# dumpMatrix2File
#
# matrix -> numpy 1D or 2D arrays
# filename -> name of the file to be created
#
#
#========================================================================
def dumpMatrix2File(matrix,filename):
| x=buf.find('\n')
line=buf[0:x+1]
#print 'line= '+line
new_line=line.rstrip('\n')+sep+str(a[i])+'\n'
#print 'new_line= '+new_line
invasion=len(new_line)
#print 'size of invasion='+str(invasion)
buf=buf[x+1::]
#print 'buffer without line= \n'+buf | conditional_block |
utils.py | ax=np.asarray(np.zeros(shapes.shape[0]))
ay=np.asarray(np.zeros(shapes.shape[0]))
tx=np.asarray(np.zeros(shapes.shape[0]))
ty=np.asarray(np.zeros(shapes.shape[0]))
# The "while" loop checks the convergence of the alignment.
# The convergence is checked measuring the difference of previous mean_shape
# an the last calculated mean shape.
error=sum(abs(np.ravel(previous_mean_shape)-np.ravel(mean_shape)))
print "error = "+str(error)
while (error>0.0001):
print sum(abs(np.ravel(previous_mean_shape)-np.ravel(mean_shape)))
print 'Iteration ',it
it=it+1
previous_mean_shape=np.copy(mean_shape)
# Normalizing the mean shape to the first shape
axm,aym,txm,tym=alignPairShapes(shapes[0],mean_shape,weights)
mean_shape=RST(mean_shape,axm,aym,txm,tym)
# Align all shapes to the mean shape
for i in range(len(images)):
#print 'Aligning shape '+str(i)
ax[i],ay[i],tx[i],ty[i]=alignPairShapes(mean_shape,shapes[i],weights)
aligned_shapes[i]=RST(shapes[i],ax[i],ay[i],tx[i],ty[i])
# Calculate new mean shape
mean_shape=np.add.reduce(aligned_shapes)/float(aligned_shapes.shape[0])
#print mean_shape
error=sum(abs(np.ravel(previous_mean_shape)-np.ravel(mean_shape)))
print "error = "+str(error)
if save_aligned_images==True:
for i in range(len(images)):
im=cv2.imread(images[i])
dsize=(im.shape[1],im.shape[0])
T=np.asarray([[ax[i],-ay[i],tx[i]],[ay[i],ax[i],ty[i]]])
im=cv2.warpAffine(im,T,dsize)
fileName, fileExtension = os.path.splitext(os.path.basename(images[i]))
cv2.imwrite(fileName+'_aligned'+fileExtension,im)
return mean_shape,aligned_shapes
#==================================================
# dist
#
# Calculates the euclidean distance between two points
# given their coordinates (x,y) and (u,v)
#==================================================
def dist(x,y,u,v):
dist=np.sqrt(pow((x-u),2)+pow((y-v),2))
return dist
#============================================================
# nearest_point
#
# Given a set of points defined by the sequence of
# coordinates in the vectors 'x' e 'y', the function
# returns two vectors that determines the nearest point and
# the calculated distance between the corresponding point
# and the remaining points.
# EXAMPLE:
# Consider the set of points:
# P1=(10,20)
# P2=(11,21)
# P3=(100,200)
# The input to the function will be:
# x=([10,11,100])
# y=([20,21,200])
# P2 is the nearest point to P1 and vice-versa.
# P2 is the nearest point of P3.
# So, the function will return:
# indices: [1,0,1]
# distances:[1.41,1.41,199.9]
#
#============================================================
def nearest_point(x,y):
x=np.asarray(x)
y=np.asarray(y)
number_of_points=x.shape[0]
ut1,ut2=np.triu_indices(number_of_points,1) #without the main diagonal
distances=dist(x[ut1[:]],y[ut1[:]],x[ut2[:]],y[ut2[:]])
d_matrix=np.array(np.zeros((number_of_points,number_of_points)))
d_matrix[ut1[:],ut2[:]]=distances[:]
d_matrix[ut2[:],ut1[:]]=distances[:]
d_matrix[range(number_of_points),range(number_of_points)]=np.max(distances)
min_indices=np.array(np.zeros(number_of_points))
min_dist=np.array(np.zeros(number_of_points))
for i in range(number_of_points):
min_indices[i]=int(np.argmin(d_matrix[i,:]))
min_dist[i]=d_matrix[i,np.uint8(min_indices[i])]
print min_indices[i]
return min_indices,min_dist
#============================================================
# reticulate
#
# Creates an 1 channel image/array of dimensions h x w pixels
# with a reticulate that is spaced s pixels, with lines l
# pixels large. (Background is white, net is black) |
def reticulate(h=302,w=527,s=15,l=2):
ret=np.array(np.zeros((h,w)))
ret=ret+255
for i in range(l):
ret[:,i::s]=0
ret[i::s]=0
return ret
#===================================================================
# crop
#
# im -> image (numpy array)
# ox -> column to start crop (column included in cropped image)
# oy -> row to start crop (row included in cropped image)
# width -> of final image
# height -> of final image
#===================================================================
def crop(im,ox,oy,width,height):
cropped_image=im[oy:(oy+height),ox:(ox+width)]
return cropped_image
#========================================================================
# addcoltofile
#
# filename -> the array will be added as a column to this file
# a -> array (will be transformed on a 1d array)
# sep -> separator string
#
#========================================================================
def addcoltofile(filename,a,sep):
a=np.ravel(np.asarray(a))
try:
f=open(filename,'r+')
except IOError:
try:
f=open(filename,'w+r+')
except IOError:
print "IOError."
#return
line=f.readline()
if line=="":
# File is empty
for i in range(len(a)):
f.write(str(a[i])+'\n')
else:
EOF=False
pointer_to_write=0
pointer_to_read=f.tell()
new_line=line.rstrip('\n')+sep+str(a[0])+'\n'
#print 'new_line= '+new_line
invasion=len(new_line)-len(line)
#print 'size of invasion='+str(invasion)
#print 'pointer_to_write='+str(pointer_to_write)
#print 'pointer_to_read='+str(pointer_to_read)
buf=""
for i in range(1,len(a)+1):
#print EOF
if EOF==False:
aux=f.read(invasion)
buf=buf+aux
#print "Invasion read: "+str(aux)
aux=""
while (aux.find('\n')==-1) and (EOF==False):
aux=f.read(1)
buf=buf+aux
#print 'updated buffer= \n'+buf
if aux=="":
# Reached EOF
EOF=True
#print 'EOF'
break
pointer_to_read=f.tell()
f.seek(pointer_to_write)
f.write(new_line)
pointer_to_write=f.tell()
f.seek(pointer_to_read)
#print 'pointer_to_read='+str(pointer_to_read)
#print 'pointer_to_write='+str(pointer_to_write)
if i<(len(a)):
x=buf.find('\n')
line=buf[0:x+1]
#print 'line= '+line
new_line=line.rstrip('\n')+sep+str(a[i])+'\n'
#print 'new_line= '+new_line
invasion=len(new_line)
#print 'size of invasion='+str(invasion)
buf=buf[x+1::]
#print 'buffer without line= \n'+buf
else:
break
f.seek(pointer_to_write)
if f.readline()!="":
print "Attention!The provided array has less elements than\n"
print "the number of lines in the file."
f.close()
return
#========================================================================
# visualCheckImageDB
#
# imagedb -> CSV filename
# imagedbtype -> 0 for a complete database (filenames+labels+shape)
# 1 for the simple database (filenames+shape)
# zoom -> to scale image on screen
#
#
#========================================================================
def visualcheckImageDB(imagedb,imagedbtype=0,zoom=0.5):
import procdb
if imagedbtype==0:
images,shape,labels=procdb.processImageDB(imagedb)
else:
images,shape=procdb.processImageDB2(imagedb)
shape=np.asarray(shape)
print shape
for i in range(len(images)):
im=Image.open(images[i])
im=drawPointsOnImage(im,shape[i,:,0],shape[i,:,1])
im=im.resize((int(im.size[0]*zoom+0.5),int(im.size[1]*zoom+0.5)))
print images[i]
im.show()
raw_input('Press ENTER to proceed to next image...')
return
#========================================================================
# dumpMatrix2File
#
# matrix -> numpy 1D or 2D arrays
# filename -> name of the file to be created
#
#
#========================================================================
def dumpMatrix2File(matrix,filename | #============================================================ | random_line_split |
main.rs | InMemoryConfigBuilder},
InMemoryCache,
},
gateway::cluster::{config::ShardScheme, Cluster, ClusterConfig},
gateway::shard::Event,
http::Client as HttpClient,
model::{
channel::{Channel, Message},
gateway::GatewayIntents,
id::{ChannelId, GuildId, UserId},
user::CurrentUser,
},
};
mod channel;
mod reaction;
mod role;
mod roles;
mod state;
mod theme;
mod utils;
use channel::{handle_create_channels, handle_remove_channels, handle_clear_channel_associations, handle_rename_channels};
use reaction::{handle_reaction_add, handle_reaction_remove, handle_set_reaction_message, ReactionMessageType};
use role::{handle_give_role, handle_remove_role, has_role};
use roles::ORGANIZER;
use theme::{handle_add_theme, handle_generate_theme, handle_show_all_themes, handle_show_theme_count};
use utils::{Result, send_message};
#[tokio::main]
async fn main() -> Result<()> {
dotenv::dotenv().ok();
let token = env::var("DISCORD_TOKEN")?;
// This is also the default.
let scheme = ShardScheme::Auto;
let config = ClusterConfig::builder(&token)
.shard_scheme(scheme)
// Use intents to only listen to GUILD_MESSAGES events
.intents(Some(
GatewayIntents::GUILD_MESSAGES
| GatewayIntents::DIRECT_MESSAGES
| GatewayIntents::GUILD_MESSAGE_REACTIONS,
))
.build();
// Start up the cluster
let cluster = Cluster::new(config);
cluster.up().await?;
// The http client is seperate from the gateway,
// so startup a new one
let http = HttpClient::new(&token);
// Since we only care about messages and reactions, make
// the cache only cache message and reaction related events
let cache_config = InMemoryConfigBuilder::new()
.event_types(
EventType::MESSAGE_CREATE
| EventType::MESSAGE_DELETE
| EventType::MESSAGE_DELETE_BULK
| EventType::MESSAGE_UPDATE
| EventType::REACTION_ADD
| EventType::REACTION_REMOVE,
)
.build();
let cache = InMemoryCache::from(cache_config);
let mut events = cluster.events().await;
let current_user = http.current_user().await?;
// Startup an event loop for each event in the event stream
while let Some(event) = events.next().await {
// Update the cache
cache.update(&event.1).await.expect("Cache failed, OhNoe!");
// Spawn a new task to handle the event
handle_event(event, http.clone(), ¤t_user).await?;
}
Ok(())
}
/// Checks if the specified channel is a private message channel
async fn is_pm(http: &HttpClient, channel_id: ChannelId) -> Result<bool> {
match http.channel(channel_id).await?.unwrap() {
Channel::Private(_) => Ok(true),
_ => Ok(false)
}
}
async fn handle_event(
event: (u64, Event),
http: HttpClient,
current_user: &CurrentUser
) -> Result<()> {
match event {
(_, Event::MessageCreate(msg)) => {
// Don't send replies to yourself
if msg.author.id != current_user.id {
if is_pm(&http, msg.channel_id).await? {
handle_pm(&msg, &http).await?;
}
else {
handle_potential_command(&msg, http, current_user)
.await?;
}
}
}
(_, Event::ReactionAdd(reaction)) => {
if !is_pm(&http, reaction.channel_id).await? {
handle_reaction_add(&reaction, http, ¤t_user).await?;
}
}
(_, Event::ReactionRemove(reaction)) => {
if !is_pm(&http, reaction.channel_id).await? {
handle_reaction_remove(&reaction, http).await?;
}
}
(id, Event::ShardConnected(_)) => {
println!("Connected on shard {}", id);
}
_ => {}
}
Ok(())
}
async fn handle_pm(
msg: &Message,
http: &HttpClient,
) -> Result<()> {
handle_add_theme(http, msg).await?;
Ok(())
}
async fn handle_potential_command(
msg: &Message,
http: HttpClient,
current_user: &CurrentUser
) -> Result<()> {
let mut words = msg.content.split_ascii_whitespace();
match words.next() {
Some("!help") => {
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to call for help in non-guild"),
).await?;
}
Some("!createchannels") => {
handle_create_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to create channels in non-guild"),
msg.author.id,
current_user.id,
http
).await?;
},
Some("!renamechannels") => {
handle_rename_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.author.id,
current_user.id,
http
).await?;
},
Some("!removechannels") => {
handle_remove_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to remove channels in non-guild"),
msg.author.id,
http
).await?;
},
Some("!clearassociations") => {
handle_clear_channel_associations(
msg.channel_id,
msg.guild_id.expect("Tried to clear channel associations in non-guild"),
msg.author.id,
http,
).await?;
}
Some("!role") => {
handle_give_role(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to get role in non-guild"),
&msg.author,
http
).await?;
},
Some("!leave") => {
handle_remove_role(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to leave role in non-guild"),
&msg.author,
http
).await?;
},
Some("!generatetheme") => {
handle_generate_theme(
msg.channel_id,
msg.guild_id.expect("Tried to generate theme in non-guild"),
&msg.author,
http
).await?;
}
Some("!showallthemes") => {
handle_show_all_themes(
msg.channel_id,
msg.guild_id.expect("Tried to show all themes in non-guild"),
&msg.author,
http
).await?;
}
Some("!showthemecount") => |
Some("!setroleassign") => {
handle_set_reaction_message(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to set role assignment message in non-guild"),
&msg.author,
http,
msg,
ReactionMessageType::RoleAssign,
).await?;
}
Some(s) if s.chars().next() == Some('!') => {
send_message(&http, msg.channel_id, msg.author.id,
format!("Unrecognised command `{}`.", s)
).await?;
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to issue a command in non-guild"),
).await?;
}
// Not a command and probably not for us
Some(_) => {
// Check if we were mentioned
if msg.mentions.contains_key(¤t_user.id) {
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to mention us in non-guild"),
).await?;
}
}
None => {}
}
Ok(())
}
async fn send_help_message(
http: HttpClient,
channel_id: ChannelId,
user_id: UserId,
guild_id: GuildId,
) -> Result<()> {
let standard_message =
//"Send me a PM to submit theme ideas.\n\n\
"Get a role to signify one of your skill sets with the command `!role <role name>`\n\
and leave a role with `!leave <role name>`.\n\n\
You can also ask for text and voice channels for your game \
with the command `!createchannels <game name>`\n\
and rename them with `!renamechannels <new game name>`.";
let organizer_message = format!(
"Since you have the **{}** role, you also have access to the \
following commands:\n\
- `!generatetheme` to generate a theme.\n\
- `!showallthemes` to view all the theme ideas that have been submitted.\n\
- `!showthemecount` to see the number of theme ideas that have been submitted.\n\
- `!removechannels <mention of user>` to remove a user's created channel.\n\
- `!clearassociations` to clear all user–channel associations.\n\
- `!setroleassign <mention of channel with the message> <message ID>` to \
set the server's role assignment message.", ORGANIZER
| {
handle_show_theme_count(
msg.channel_id,
msg.guild_id.expect("Tried to show theme idea count in non-guild"),
&msg.author,
http
).await?;
} | conditional_block |
main.rs | InMemoryConfigBuilder},
InMemoryCache,
},
gateway::cluster::{config::ShardScheme, Cluster, ClusterConfig},
gateway::shard::Event,
http::Client as HttpClient,
model::{
channel::{Channel, Message},
gateway::GatewayIntents,
id::{ChannelId, GuildId, UserId},
user::CurrentUser,
},
};
mod channel;
mod reaction;
mod role;
mod roles;
mod state;
mod theme;
mod utils;
use channel::{handle_create_channels, handle_remove_channels, handle_clear_channel_associations, handle_rename_channels};
use reaction::{handle_reaction_add, handle_reaction_remove, handle_set_reaction_message, ReactionMessageType};
use role::{handle_give_role, handle_remove_role, has_role};
use roles::ORGANIZER;
use theme::{handle_add_theme, handle_generate_theme, handle_show_all_themes, handle_show_theme_count};
use utils::{Result, send_message};
#[tokio::main]
async fn main() -> Result<()> {
dotenv::dotenv().ok();
let token = env::var("DISCORD_TOKEN")?;
// This is also the default.
let scheme = ShardScheme::Auto;
let config = ClusterConfig::builder(&token)
.shard_scheme(scheme)
// Use intents to only listen to GUILD_MESSAGES events
.intents(Some(
GatewayIntents::GUILD_MESSAGES
| GatewayIntents::DIRECT_MESSAGES
| GatewayIntents::GUILD_MESSAGE_REACTIONS,
))
.build();
// Start up the cluster
let cluster = Cluster::new(config);
cluster.up().await?;
// The http client is seperate from the gateway,
// so startup a new one
let http = HttpClient::new(&token);
// Since we only care about messages and reactions, make
// the cache only cache message and reaction related events
let cache_config = InMemoryConfigBuilder::new()
.event_types(
EventType::MESSAGE_CREATE
| EventType::MESSAGE_DELETE
| EventType::MESSAGE_DELETE_BULK
| EventType::MESSAGE_UPDATE
| EventType::REACTION_ADD
| EventType::REACTION_REMOVE,
)
.build();
let cache = InMemoryCache::from(cache_config);
let mut events = cluster.events().await;
let current_user = http.current_user().await?;
// Startup an event loop for each event in the event stream
while let Some(event) = events.next().await {
// Update the cache
cache.update(&event.1).await.expect("Cache failed, OhNoe!");
// Spawn a new task to handle the event
handle_event(event, http.clone(), ¤t_user).await?;
}
Ok(())
}
/// Checks if the specified channel is a private message channel
async fn is_pm(http: &HttpClient, channel_id: ChannelId) -> Result<bool> {
match http.channel(channel_id).await?.unwrap() {
Channel::Private(_) => Ok(true),
_ => Ok(false)
}
}
async fn handle_event(
event: (u64, Event),
http: HttpClient,
current_user: &CurrentUser
) -> Result<()> {
match event {
(_, Event::MessageCreate(msg)) => {
// Don't send replies to yourself
if msg.author.id != current_user.id {
if is_pm(&http, msg.channel_id).await? {
handle_pm(&msg, &http).await?;
}
else {
handle_potential_command(&msg, http, current_user)
.await?;
}
}
}
(_, Event::ReactionAdd(reaction)) => {
if !is_pm(&http, reaction.channel_id).await? {
handle_reaction_add(&reaction, http, ¤t_user).await?;
}
}
(_, Event::ReactionRemove(reaction)) => {
if !is_pm(&http, reaction.channel_id).await? {
handle_reaction_remove(&reaction, http).await?;
}
}
(id, Event::ShardConnected(_)) => {
println!("Connected on shard {}", id);
}
_ => {}
}
Ok(())
}
async fn handle_pm(
msg: &Message,
http: &HttpClient,
) -> Result<()> {
handle_add_theme(http, msg).await?;
Ok(())
}
async fn handle_potential_command(
msg: &Message,
http: HttpClient,
current_user: &CurrentUser
) -> Result<()> {
let mut words = msg.content.split_ascii_whitespace();
match words.next() {
Some("!help") => {
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to call for help in non-guild"),
).await?;
}
Some("!createchannels") => {
handle_create_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to create channels in non-guild"),
msg.author.id,
current_user.id,
http
).await?;
},
Some("!renamechannels") => {
handle_rename_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.author.id,
current_user.id,
http
).await?;
},
Some("!removechannels") => {
handle_remove_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to remove channels in non-guild"),
msg.author.id,
http
).await?;
},
Some("!clearassociations") => {
handle_clear_channel_associations(
msg.channel_id,
msg.guild_id.expect("Tried to clear channel associations in non-guild"),
msg.author.id,
http,
).await?;
}
Some("!role") => {
handle_give_role(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to get role in non-guild"),
&msg.author,
http
).await?;
},
Some("!leave") => {
handle_remove_role(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to leave role in non-guild"),
&msg.author,
http
).await?;
},
Some("!generatetheme") => {
handle_generate_theme(
msg.channel_id,
msg.guild_id.expect("Tried to generate theme in non-guild"),
&msg.author,
http
).await?;
}
Some("!showallthemes") => {
handle_show_all_themes(
msg.channel_id,
msg.guild_id.expect("Tried to show all themes in non-guild"),
&msg.author,
http
).await?;
}
Some("!showthemecount") => {
handle_show_theme_count(
msg.channel_id,
msg.guild_id.expect("Tried to show theme idea count in non-guild"),
&msg.author,
http
).await?;
}
Some("!setroleassign") => {
handle_set_reaction_message(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to set role assignment message in non-guild"),
&msg.author,
http,
msg,
ReactionMessageType::RoleAssign,
).await?;
}
Some(s) if s.chars().next() == Some('!') => {
send_message(&http, msg.channel_id, msg.author.id,
format!("Unrecognised command `{}`.", s)
).await?;
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to issue a command in non-guild"),
).await?;
}
// Not a command and probably not for us
Some(_) => {
// Check if we were mentioned
if msg.mentions.contains_key(¤t_user.id) {
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to mention us in non-guild"),
).await?;
}
}
None => {}
}
Ok(())
}
async fn | (
http: HttpClient,
channel_id: ChannelId,
user_id: UserId,
guild_id: GuildId,
) -> Result<()> {
let standard_message =
//"Send me a PM to submit theme ideas.\n\n\
"Get a role to signify one of your skill sets with the command `!role <role name>`\n\
and leave a role with `!leave <role name>`.\n\n\
You can also ask for text and voice channels for your game \
with the command `!createchannels <game name>`\n\
and rename them with `!renamechannels <new game name>`.";
let organizer_message = format!(
"Since you have the **{}** role, you also have access to the \
following commands:\n\
- `!generatetheme` to generate a theme.\n\
- `!showallthemes` to view all the theme ideas that have been submitted.\n\
- `!showthemecount` to see the number of theme ideas that have been submitted.\n\
- `!removechannels <mention of user>` to remove a user's created channel.\n\
- `!clearassociations` to clear all user–channel associations.\n\
- `!setroleassign <mention of channel with the message> <message ID>` to \
set the server's role assignment message.", ORGANIZER
);
| send_help_message | identifier_name |
main.rs | InMemoryConfigBuilder},
InMemoryCache,
},
gateway::cluster::{config::ShardScheme, Cluster, ClusterConfig},
gateway::shard::Event,
http::Client as HttpClient,
model::{
channel::{Channel, Message},
gateway::GatewayIntents,
id::{ChannelId, GuildId, UserId},
user::CurrentUser,
},
};
mod channel;
mod reaction;
mod role;
mod roles;
mod state;
mod theme;
mod utils;
use channel::{handle_create_channels, handle_remove_channels, handle_clear_channel_associations, handle_rename_channels};
use reaction::{handle_reaction_add, handle_reaction_remove, handle_set_reaction_message, ReactionMessageType};
use role::{handle_give_role, handle_remove_role, has_role};
use roles::ORGANIZER;
use theme::{handle_add_theme, handle_generate_theme, handle_show_all_themes, handle_show_theme_count};
use utils::{Result, send_message};
#[tokio::main]
async fn main() -> Result<()> {
dotenv::dotenv().ok();
let token = env::var("DISCORD_TOKEN")?;
// This is also the default.
let scheme = ShardScheme::Auto;
let config = ClusterConfig::builder(&token)
.shard_scheme(scheme)
// Use intents to only listen to GUILD_MESSAGES events
.intents(Some(
GatewayIntents::GUILD_MESSAGES
| GatewayIntents::DIRECT_MESSAGES
| GatewayIntents::GUILD_MESSAGE_REACTIONS,
))
.build();
// Start up the cluster
let cluster = Cluster::new(config);
cluster.up().await?;
// The http client is seperate from the gateway,
// so startup a new one
let http = HttpClient::new(&token);
// Since we only care about messages and reactions, make
// the cache only cache message and reaction related events
let cache_config = InMemoryConfigBuilder::new()
.event_types(
EventType::MESSAGE_CREATE
| EventType::MESSAGE_DELETE
| EventType::MESSAGE_DELETE_BULK
| EventType::MESSAGE_UPDATE
| EventType::REACTION_ADD
| EventType::REACTION_REMOVE,
)
.build();
let cache = InMemoryCache::from(cache_config);
let mut events = cluster.events().await;
let current_user = http.current_user().await?;
// Startup an event loop for each event in the event stream
while let Some(event) = events.next().await {
// Update the cache
cache.update(&event.1).await.expect("Cache failed, OhNoe!");
// Spawn a new task to handle the event
handle_event(event, http.clone(), ¤t_user).await?;
}
Ok(())
}
/// Checks if the specified channel is a private message channel
async fn is_pm(http: &HttpClient, channel_id: ChannelId) -> Result<bool> |
async fn handle_event(
event: (u64, Event),
http: HttpClient,
current_user: &CurrentUser
) -> Result<()> {
match event {
(_, Event::MessageCreate(msg)) => {
// Don't send replies to yourself
if msg.author.id != current_user.id {
if is_pm(&http, msg.channel_id).await? {
handle_pm(&msg, &http).await?;
}
else {
handle_potential_command(&msg, http, current_user)
.await?;
}
}
}
(_, Event::ReactionAdd(reaction)) => {
if !is_pm(&http, reaction.channel_id).await? {
handle_reaction_add(&reaction, http, ¤t_user).await?;
}
}
(_, Event::ReactionRemove(reaction)) => {
if !is_pm(&http, reaction.channel_id).await? {
handle_reaction_remove(&reaction, http).await?;
}
}
(id, Event::ShardConnected(_)) => {
println!("Connected on shard {}", id);
}
_ => {}
}
Ok(())
}
async fn handle_pm(
msg: &Message,
http: &HttpClient,
) -> Result<()> {
handle_add_theme(http, msg).await?;
Ok(())
}
async fn handle_potential_command(
msg: &Message,
http: HttpClient,
current_user: &CurrentUser
) -> Result<()> {
let mut words = msg.content.split_ascii_whitespace();
match words.next() {
Some("!help") => {
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to call for help in non-guild"),
).await?;
}
Some("!createchannels") => {
handle_create_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to create channels in non-guild"),
msg.author.id,
current_user.id,
http
).await?;
},
Some("!renamechannels") => {
handle_rename_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.author.id,
current_user.id,
http
).await?;
},
Some("!removechannels") => {
handle_remove_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to remove channels in non-guild"),
msg.author.id,
http
).await?;
},
Some("!clearassociations") => {
handle_clear_channel_associations(
msg.channel_id,
msg.guild_id.expect("Tried to clear channel associations in non-guild"),
msg.author.id,
http,
).await?;
}
Some("!role") => {
handle_give_role(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to get role in non-guild"),
&msg.author,
http
).await?;
},
Some("!leave") => {
handle_remove_role(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to leave role in non-guild"),
&msg.author,
http
).await?;
},
Some("!generatetheme") => {
handle_generate_theme(
msg.channel_id,
msg.guild_id.expect("Tried to generate theme in non-guild"),
&msg.author,
http
).await?;
}
Some("!showallthemes") => {
handle_show_all_themes(
msg.channel_id,
msg.guild_id.expect("Tried to show all themes in non-guild"),
&msg.author,
http
).await?;
}
Some("!showthemecount") => {
handle_show_theme_count(
msg.channel_id,
msg.guild_id.expect("Tried to show theme idea count in non-guild"),
&msg.author,
http
).await?;
}
Some("!setroleassign") => {
handle_set_reaction_message(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to set role assignment message in non-guild"),
&msg.author,
http,
msg,
ReactionMessageType::RoleAssign,
).await?;
}
Some(s) if s.chars().next() == Some('!') => {
send_message(&http, msg.channel_id, msg.author.id,
format!("Unrecognised command `{}`.", s)
).await?;
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to issue a command in non-guild"),
).await?;
}
// Not a command and probably not for us
Some(_) => {
// Check if we were mentioned
if msg.mentions.contains_key(¤t_user.id) {
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to mention us in non-guild"),
).await?;
}
}
None => {}
}
Ok(())
}
async fn send_help_message(
http: HttpClient,
channel_id: ChannelId,
user_id: UserId,
guild_id: GuildId,
) -> Result<()> {
let standard_message =
//"Send me a PM to submit theme ideas.\n\n\
"Get a role to signify one of your skill sets with the command `!role <role name>`\n\
and leave a role with `!leave <role name>`.\n\n\
You can also ask for text and voice channels for your game \
with the command `!createchannels <game name>`\n\
and rename them with `!renamechannels <new game name>`.";
let organizer_message = format!(
"Since you have the **{}** role, you also have access to the \
following commands:\n\
- `!generatetheme` to generate a theme.\n\
- `!showallthemes` to view all the theme ideas that have been submitted.\n\
- `!showthemecount` to see the number of theme ideas that have been submitted.\n\
- `!removechannels <mention of user>` to remove a user's created channel.\n\
- `!clearassociations` to clear all user–channel associations.\n\
- `!setroleassign <mention of channel with the message> <message ID>` to \
set the server's role assignment message.", ORGANIZER
| {
match http.channel(channel_id).await?.unwrap() {
Channel::Private(_) => Ok(true),
_ => Ok(false)
}
} | identifier_body |
main.rs | ::{Channel, Message},
gateway::GatewayIntents,
id::{ChannelId, GuildId, UserId},
user::CurrentUser,
},
};
mod channel;
mod reaction;
mod role;
mod roles;
mod state;
mod theme;
mod utils;
use channel::{handle_create_channels, handle_remove_channels, handle_clear_channel_associations, handle_rename_channels};
use reaction::{handle_reaction_add, handle_reaction_remove, handle_set_reaction_message, ReactionMessageType};
use role::{handle_give_role, handle_remove_role, has_role};
use roles::ORGANIZER;
use theme::{handle_add_theme, handle_generate_theme, handle_show_all_themes, handle_show_theme_count};
use utils::{Result, send_message};
#[tokio::main]
async fn main() -> Result<()> {
dotenv::dotenv().ok();
let token = env::var("DISCORD_TOKEN")?;
// This is also the default.
let scheme = ShardScheme::Auto;
let config = ClusterConfig::builder(&token)
.shard_scheme(scheme)
// Use intents to only listen to GUILD_MESSAGES events
.intents(Some(
GatewayIntents::GUILD_MESSAGES
| GatewayIntents::DIRECT_MESSAGES
| GatewayIntents::GUILD_MESSAGE_REACTIONS,
))
.build();
// Start up the cluster
let cluster = Cluster::new(config);
cluster.up().await?;
// The http client is seperate from the gateway,
// so startup a new one
let http = HttpClient::new(&token);
// Since we only care about messages and reactions, make
// the cache only cache message and reaction related events
let cache_config = InMemoryConfigBuilder::new()
.event_types(
EventType::MESSAGE_CREATE
| EventType::MESSAGE_DELETE
| EventType::MESSAGE_DELETE_BULK
| EventType::MESSAGE_UPDATE
| EventType::REACTION_ADD
| EventType::REACTION_REMOVE,
)
.build();
let cache = InMemoryCache::from(cache_config);
let mut events = cluster.events().await;
let current_user = http.current_user().await?;
// Startup an event loop for each event in the event stream
while let Some(event) = events.next().await {
// Update the cache
cache.update(&event.1).await.expect("Cache failed, OhNoe!");
// Spawn a new task to handle the event
handle_event(event, http.clone(), ¤t_user).await?;
}
Ok(())
}
/// Checks if the specified channel is a private message channel
async fn is_pm(http: &HttpClient, channel_id: ChannelId) -> Result<bool> {
match http.channel(channel_id).await?.unwrap() {
Channel::Private(_) => Ok(true),
_ => Ok(false)
}
}
async fn handle_event(
event: (u64, Event),
http: HttpClient,
current_user: &CurrentUser
) -> Result<()> {
match event {
(_, Event::MessageCreate(msg)) => {
// Don't send replies to yourself
if msg.author.id != current_user.id {
if is_pm(&http, msg.channel_id).await? {
handle_pm(&msg, &http).await?;
}
else {
handle_potential_command(&msg, http, current_user)
.await?;
}
}
}
(_, Event::ReactionAdd(reaction)) => {
if !is_pm(&http, reaction.channel_id).await? {
handle_reaction_add(&reaction, http, ¤t_user).await?;
}
}
(_, Event::ReactionRemove(reaction)) => {
if !is_pm(&http, reaction.channel_id).await? {
handle_reaction_remove(&reaction, http).await?;
}
}
(id, Event::ShardConnected(_)) => {
println!("Connected on shard {}", id);
}
_ => {}
}
Ok(())
}
async fn handle_pm(
msg: &Message,
http: &HttpClient,
) -> Result<()> {
handle_add_theme(http, msg).await?;
Ok(())
}
async fn handle_potential_command(
msg: &Message,
http: HttpClient,
current_user: &CurrentUser
) -> Result<()> {
let mut words = msg.content.split_ascii_whitespace();
match words.next() {
Some("!help") => {
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to call for help in non-guild"),
).await?;
}
Some("!createchannels") => {
handle_create_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to create channels in non-guild"),
msg.author.id,
current_user.id,
http
).await?;
},
Some("!renamechannels") => {
handle_rename_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.author.id,
current_user.id,
http
).await?;
},
Some("!removechannels") => {
handle_remove_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to remove channels in non-guild"),
msg.author.id,
http
).await?;
},
Some("!clearassociations") => {
handle_clear_channel_associations(
msg.channel_id,
msg.guild_id.expect("Tried to clear channel associations in non-guild"),
msg.author.id,
http,
).await?;
}
Some("!role") => {
handle_give_role(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to get role in non-guild"),
&msg.author,
http
).await?;
},
Some("!leave") => {
handle_remove_role(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to leave role in non-guild"),
&msg.author,
http
).await?;
},
Some("!generatetheme") => {
handle_generate_theme(
msg.channel_id,
msg.guild_id.expect("Tried to generate theme in non-guild"),
&msg.author,
http
).await?;
}
Some("!showallthemes") => {
handle_show_all_themes(
msg.channel_id,
msg.guild_id.expect("Tried to show all themes in non-guild"),
&msg.author,
http
).await?;
}
Some("!showthemecount") => {
handle_show_theme_count(
msg.channel_id,
msg.guild_id.expect("Tried to show theme idea count in non-guild"),
&msg.author,
http
).await?;
}
Some("!setroleassign") => {
handle_set_reaction_message(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to set role assignment message in non-guild"),
&msg.author,
http,
msg,
ReactionMessageType::RoleAssign,
).await?;
}
Some(s) if s.chars().next() == Some('!') => {
send_message(&http, msg.channel_id, msg.author.id,
format!("Unrecognised command `{}`.", s)
).await?;
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to issue a command in non-guild"),
).await?;
}
// Not a command and probably not for us
Some(_) => {
// Check if we were mentioned
if msg.mentions.contains_key(¤t_user.id) {
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to mention us in non-guild"),
).await?;
}
}
None => {}
}
Ok(())
}
async fn send_help_message(
http: HttpClient,
channel_id: ChannelId,
user_id: UserId,
guild_id: GuildId,
) -> Result<()> {
let standard_message =
//"Send me a PM to submit theme ideas.\n\n\
"Get a role to signify one of your skill sets with the command `!role <role name>`\n\
and leave a role with `!leave <role name>`.\n\n\
You can also ask for text and voice channels for your game \
with the command `!createchannels <game name>`\n\
and rename them with `!renamechannels <new game name>`.";
let organizer_message = format!(
"Since you have the **{}** role, you also have access to the \
following commands:\n\
- `!generatetheme` to generate a theme.\n\
- `!showallthemes` to view all the theme ideas that have been submitted.\n\
- `!showthemecount` to see the number of theme ideas that have been submitted.\n\
- `!removechannels <mention of user>` to remove a user's created channel.\n\
- `!clearassociations` to clear all user–channel associations.\n\
- `!setroleassign <mention of channel with the message> <message ID>` to \
set the server's role assignment message.", ORGANIZER
);
let help_message = | if has_role(&http, guild_id, user_id, ORGANIZER).await? {
format!("{}\n\n{}", standard_message, organizer_message)
}
else {
standard_message.to_string() | random_line_split |
|
instance.py | ,
expiration: datetime.datetime,
enable_iam_auth: bool,
) -> None:
self.ip_addrs = ip_addrs
self.database_version = database_version
self.context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# update ssl.PROTOCOL_TLS_CLIENT default
self.context.check_hostname = False
# verify OpenSSL version supports TLSv1.3
if ssl.HAS_TLSv1_3:
# force TLSv1.3 if supported by client
self.context.minimum_version = ssl.TLSVersion.TLSv1_3
# fallback to TLSv1.2 for older versions of OpenSSL
else:
if enable_iam_auth:
raise TLSVersionError(
f"Your current version of OpenSSL ({ssl.OPENSSL_VERSION}) does not "
"support TLSv1.3, which is required to use IAM Authentication.\n"
"Upgrade your OpenSSL version to 1.1.1 for TLSv1.3 support."
)
logger.warning(
"TLSv1.3 is not supported with your version of OpenSSL "
f"({ssl.OPENSSL_VERSION}), falling back to TLSv1.2\n"
"Upgrade your OpenSSL version to 1.1.1 for TLSv1.3 support."
)
self.context.minimum_version = ssl.TLSVersion.TLSv1_2
self.expiration = expiration
# tmpdir and its contents are automatically deleted after the CA cert
# and ephemeral cert are loaded into the SSLcontext. The values
# need to be written to files in order to be loaded by the SSLContext
with TemporaryDirectory() as tmpdir:
ca_filename, cert_filename, key_filename = write_to_file(
tmpdir, server_ca_cert, ephemeral_cert, private_key
)
self.context.load_cert_chain(cert_filename, keyfile=key_filename)
self.context.load_verify_locations(cafile=ca_filename)
def get_preferred_ip(self, ip_type: IPTypes) -> str:
"""Returns the first IP address for the instance, according to the preference
supplied by ip_type. If no IP addressess with the given preference are found,
an error is raised."""
if ip_type.value in self.ip_addrs:
return self.ip_addrs[ip_type.value]
raise CloudSQLIPTypeError(
"Cloud SQL instance does not have any IP addresses matching "
f"preference: {ip_type.value})"
)
class Instance:
"""A class to manage the details of the connection to a Cloud SQL
instance, including refreshing the credentials.
:param instance_connection_string:
The Google Cloud SQL Instance's connection
string.
:type instance_connection_string: str
:param user_agent_string:
The user agent string to append to SQLAdmin API requests
:type user_agent_string: str
:type credentials: google.auth.credentials.Credentials
:param credentials
Credentials object used to authenticate connections to Cloud SQL server.
If not specified, Application Default Credentials are used.
:param enable_iam_auth
Enables automatic IAM database authentication for Postgres or MySQL
instances.
:type enable_iam_auth: bool
:param loop:
A new event loop for the refresh function to run in.
:type loop: asyncio.AbstractEventLoop
:type quota_project: str
:param quota_project
The Project ID for an existing Google Cloud project. The project specified
is used for quota and billing purposes. If not specified, defaults to
project sourced from environment.
:type sqladmin_api_endpoint: str
:param sqladmin_api_endpoint:
Base URL to use when calling the Cloud SQL Admin API endpoint.
Defaults to "https://sqladmin.googleapis.com", this argument should
only be used in development.
"""
# asyncio.AbstractEventLoop is used because the default loop,
# SelectorEventLoop, is usable on both Unix and Windows but has limited
# functionality on Windows. It is recommended to use ProactorEventLoop
# while developing on Windows.
# Link to Github issue:
# https://github.com/GoogleCloudPlatform/cloud-sql-python-connector/issues/22
_loop: asyncio.AbstractEventLoop
_enable_iam_auth: bool
__client_session: Optional[aiohttp.ClientSession] = None
@property
def _client_session(self) -> aiohttp.ClientSession:
if self.__client_session is None:
headers = {
"x-goog-api-client": self._user_agent_string,
"User-Agent": self._user_agent_string,
"Content-Type": "application/json",
}
if self._quota_project:
headers["x-goog-user-project"] = self._quota_project
self.__client_session = aiohttp.ClientSession(headers=headers)
return self.__client_session
_credentials: Optional[Credentials] = None
_keys: asyncio.Future
_instance_connection_string: str
_user_agent_string: str
_sqladmin_api_endpoint: str
_instance: str
_project: str
_region: str
_refresh_rate_limiter: AsyncRateLimiter
_refresh_in_progress: asyncio.locks.Event
_current: asyncio.Task # task wraps coroutine that returns InstanceMetadata
_next: asyncio.Task # task wraps coroutine that returns another task
def __init__(
self,
instance_connection_string: str,
driver_name: str,
keys: asyncio.Future,
loop: asyncio.AbstractEventLoop,
credentials: Optional[Credentials] = None,
enable_iam_auth: bool = False,
quota_project: str = None,
sqladmin_api_endpoint: str = "https://sqladmin.googleapis.com",
) -> None:
# Validate connection string
connection_string_split = instance_connection_string.split(":")
if len(connection_string_split) == 3:
self._instance_connection_string = instance_connection_string
self._project = connection_string_split[0]
self._region = connection_string_split[1]
self._instance = connection_string_split[2]
else:
raise ValueError(
"Arg `instance_connection_string` must have "
"format: PROJECT:REGION:INSTANCE, "
f"got {instance_connection_string}."
)
self._enable_iam_auth = enable_iam_auth
self._user_agent_string = f"{APPLICATION_NAME}/{version}+{driver_name}"
self._quota_project = quota_project
self._sqladmin_api_endpoint = sqladmin_api_endpoint
self._loop = loop
self._keys = keys
# validate credentials type
if not isinstance(credentials, Credentials) and credentials is not None:
raise CredentialsTypeError(
"Arg credentials must be type 'google.auth.credentials.Credentials' "
"or None (to use Application Default Credentials)"
)
self._credentials = _auth_init(credentials)
self._refresh_rate_limiter = AsyncRateLimiter(
max_capacity=2, rate=1 / 30, loop=self._loop
)
self._refresh_in_progress = asyncio.locks.Event()
self._current = self._schedule_refresh(0)
self._next = self._current
async def | (self) -> None:
"""
Forces a new refresh attempt immediately to be used for future connection attempts.
"""
# if next refresh is not already in progress, cancel it and schedule new one immediately
if not self._refresh_in_progress.is_set():
self._next.cancel()
self._next = self._schedule_refresh(0)
# block all sequential connection attempts on the next refresh result if current is invalid
if not await _is_valid(self._current):
self._current = self._next
async def _perform_refresh(self) -> InstanceMetadata:
"""Retrieves instance metadata and ephemeral certificate from the
Cloud SQL Instance.
:rtype: InstanceMetadata
:returns: A dataclass containing a string representing the ephemeral certificate, a dict
containing the instances IP adresses, a string representing a PEM-encoded private key
and a string representing a PEM-encoded certificate authority.
"""
self._refresh_in_progress.set()
logger.debug(
f"['{self._instance_connection_string}']: Entered _perform_refresh"
)
try:
await self._refresh_rate_limiter.acquire()
priv_key, pub_key = await self._keys
logger.debug(f"['{self._instance_connection_string}']: Creating context")
metadata_task = self._loop.create_task(
_get_metadata(
self._client_session,
self._sqladmin_api_endpoint,
self._credentials,
self._project,
self._region,
self._instance,
)
)
ephemeral_task = self._loop.create_task(
_get_ephemeral(
self._client_session,
self._sqladmin_api_endpoint,
self._credentials,
self._project,
self._instance,
pub_key,
self._enable_iam_auth,
)
)
try:
metadata = await metadata_task
# check if automatic IAM database authn is supported for database engine
if self._enable_iam_auth and not metadata[
"database_version"
].startswith(("POSTGRES", "MYSQL")):
raise AutoIAMAuthNotSupported(
f"'{metadata['database_version']}' does not support automatic IAM authentication. It is only supported with Cloud SQL Postgres or MySQL instances."
)
except Exception:
# cancel ephemeral cert task if exception occurs | force_refresh | identifier_name |
instance.py | IP addressess with the given preference are found,
an error is raised."""
if ip_type.value in self.ip_addrs:
return self.ip_addrs[ip_type.value]
raise CloudSQLIPTypeError(
"Cloud SQL instance does not have any IP addresses matching "
f"preference: {ip_type.value})"
)
class Instance:
"""A class to manage the details of the connection to a Cloud SQL
instance, including refreshing the credentials.
:param instance_connection_string:
The Google Cloud SQL Instance's connection
string.
:type instance_connection_string: str
:param user_agent_string:
The user agent string to append to SQLAdmin API requests
:type user_agent_string: str
:type credentials: google.auth.credentials.Credentials
:param credentials
Credentials object used to authenticate connections to Cloud SQL server.
If not specified, Application Default Credentials are used.
:param enable_iam_auth
Enables automatic IAM database authentication for Postgres or MySQL
instances.
:type enable_iam_auth: bool
:param loop:
A new event loop for the refresh function to run in.
:type loop: asyncio.AbstractEventLoop
:type quota_project: str
:param quota_project
The Project ID for an existing Google Cloud project. The project specified
is used for quota and billing purposes. If not specified, defaults to
project sourced from environment.
:type sqladmin_api_endpoint: str
:param sqladmin_api_endpoint:
Base URL to use when calling the Cloud SQL Admin API endpoint.
Defaults to "https://sqladmin.googleapis.com", this argument should
only be used in development.
"""
# asyncio.AbstractEventLoop is used because the default loop,
# SelectorEventLoop, is usable on both Unix and Windows but has limited
# functionality on Windows. It is recommended to use ProactorEventLoop
# while developing on Windows.
# Link to Github issue:
# https://github.com/GoogleCloudPlatform/cloud-sql-python-connector/issues/22
_loop: asyncio.AbstractEventLoop
_enable_iam_auth: bool
__client_session: Optional[aiohttp.ClientSession] = None
@property
def _client_session(self) -> aiohttp.ClientSession:
if self.__client_session is None:
headers = {
"x-goog-api-client": self._user_agent_string,
"User-Agent": self._user_agent_string,
"Content-Type": "application/json",
}
if self._quota_project:
headers["x-goog-user-project"] = self._quota_project
self.__client_session = aiohttp.ClientSession(headers=headers)
return self.__client_session
_credentials: Optional[Credentials] = None
_keys: asyncio.Future
_instance_connection_string: str
_user_agent_string: str
_sqladmin_api_endpoint: str
_instance: str
_project: str
_region: str
_refresh_rate_limiter: AsyncRateLimiter
_refresh_in_progress: asyncio.locks.Event
_current: asyncio.Task # task wraps coroutine that returns InstanceMetadata
_next: asyncio.Task # task wraps coroutine that returns another task
def __init__(
self,
instance_connection_string: str,
driver_name: str,
keys: asyncio.Future,
loop: asyncio.AbstractEventLoop,
credentials: Optional[Credentials] = None,
enable_iam_auth: bool = False,
quota_project: str = None,
sqladmin_api_endpoint: str = "https://sqladmin.googleapis.com",
) -> None:
# Validate connection string
connection_string_split = instance_connection_string.split(":")
if len(connection_string_split) == 3:
self._instance_connection_string = instance_connection_string
self._project = connection_string_split[0]
self._region = connection_string_split[1]
self._instance = connection_string_split[2]
else:
raise ValueError(
"Arg `instance_connection_string` must have "
"format: PROJECT:REGION:INSTANCE, "
f"got {instance_connection_string}."
)
self._enable_iam_auth = enable_iam_auth
self._user_agent_string = f"{APPLICATION_NAME}/{version}+{driver_name}"
self._quota_project = quota_project
self._sqladmin_api_endpoint = sqladmin_api_endpoint
self._loop = loop
self._keys = keys
# validate credentials type
if not isinstance(credentials, Credentials) and credentials is not None:
raise CredentialsTypeError(
"Arg credentials must be type 'google.auth.credentials.Credentials' "
"or None (to use Application Default Credentials)"
)
self._credentials = _auth_init(credentials)
self._refresh_rate_limiter = AsyncRateLimiter(
max_capacity=2, rate=1 / 30, loop=self._loop
)
self._refresh_in_progress = asyncio.locks.Event()
self._current = self._schedule_refresh(0)
self._next = self._current
async def force_refresh(self) -> None:
"""
Forces a new refresh attempt immediately to be used for future connection attempts.
"""
# if next refresh is not already in progress, cancel it and schedule new one immediately
if not self._refresh_in_progress.is_set():
self._next.cancel()
self._next = self._schedule_refresh(0)
# block all sequential connection attempts on the next refresh result if current is invalid
if not await _is_valid(self._current):
self._current = self._next
async def _perform_refresh(self) -> InstanceMetadata:
"""Retrieves instance metadata and ephemeral certificate from the
Cloud SQL Instance.
:rtype: InstanceMetadata
:returns: A dataclass containing a string representing the ephemeral certificate, a dict
containing the instances IP adresses, a string representing a PEM-encoded private key
and a string representing a PEM-encoded certificate authority.
"""
self._refresh_in_progress.set()
logger.debug(
f"['{self._instance_connection_string}']: Entered _perform_refresh"
)
try:
await self._refresh_rate_limiter.acquire()
priv_key, pub_key = await self._keys
logger.debug(f"['{self._instance_connection_string}']: Creating context")
metadata_task = self._loop.create_task(
_get_metadata(
self._client_session,
self._sqladmin_api_endpoint,
self._credentials,
self._project,
self._region,
self._instance,
)
)
ephemeral_task = self._loop.create_task(
_get_ephemeral(
self._client_session,
self._sqladmin_api_endpoint,
self._credentials,
self._project,
self._instance,
pub_key,
self._enable_iam_auth,
)
)
try:
metadata = await metadata_task
# check if automatic IAM database authn is supported for database engine
if self._enable_iam_auth and not metadata[
"database_version"
].startswith(("POSTGRES", "MYSQL")):
raise AutoIAMAuthNotSupported(
f"'{metadata['database_version']}' does not support automatic IAM authentication. It is only supported with Cloud SQL Postgres or MySQL instances."
)
except Exception:
# cancel ephemeral cert task if exception occurs before it is awaited
ephemeral_task.cancel()
raise
ephemeral_cert = await ephemeral_task
x509 = load_pem_x509_certificate(
ephemeral_cert.encode("UTF-8"), default_backend()
)
expiration = x509.not_valid_after
if self._enable_iam_auth:
if self._credentials is not None:
token_expiration: datetime.datetime = self._credentials.expiry
if expiration > token_expiration:
expiration = token_expiration
except aiohttp.ClientResponseError as e:
logger.debug(
f"['{self._instance_connection_string}']: Error occurred during _perform_refresh."
)
if e.status == 403:
e.message = "Forbidden: Authenticated IAM principal does not seeem authorized to make API request. Verify 'Cloud SQL Admin API' is enabled within your GCP project and 'Cloud SQL Client' role has been granted to IAM principal."
raise
except Exception:
logger.debug(
f"['{self._instance_connection_string}']: Error occurred during _perform_refresh."
)
raise
finally:
self._refresh_in_progress.clear()
return InstanceMetadata(
ephemeral_cert,
metadata["database_version"],
metadata["ip_addresses"],
priv_key,
metadata["server_ca_cert"],
expiration,
self._enable_iam_auth,
)
def _schedule_refresh(self, delay: int) -> asyncio.Task:
"""
Schedule task to sleep and then perform refresh to get InstanceMetadata.
:type delay: int
:param delay
Time in seconds to sleep before running _perform_refresh.
:rtype: asyncio.Task
:returns: A Task representing the scheduled _perform_refresh.
"""
async def _refresh_task(self: Instance, delay: int) -> InstanceMetadata:
"""
A coroutine that sleeps for the specified amount of time before
running _perform_refresh.
"""
refresh_task: asyncio.Task
try:
logger.debug(f"['{self._instance_connection_string}']: Entering sleep")
if delay > 0:
| await asyncio.sleep(delay) | conditional_block |
|
instance.py | str,
expiration: datetime.datetime,
enable_iam_auth: bool, | # update ssl.PROTOCOL_TLS_CLIENT default
self.context.check_hostname = False
# verify OpenSSL version supports TLSv1.3
if ssl.HAS_TLSv1_3:
# force TLSv1.3 if supported by client
self.context.minimum_version = ssl.TLSVersion.TLSv1_3
# fallback to TLSv1.2 for older versions of OpenSSL
else:
if enable_iam_auth:
raise TLSVersionError(
f"Your current version of OpenSSL ({ssl.OPENSSL_VERSION}) does not "
"support TLSv1.3, which is required to use IAM Authentication.\n"
"Upgrade your OpenSSL version to 1.1.1 for TLSv1.3 support."
)
logger.warning(
"TLSv1.3 is not supported with your version of OpenSSL "
f"({ssl.OPENSSL_VERSION}), falling back to TLSv1.2\n"
"Upgrade your OpenSSL version to 1.1.1 for TLSv1.3 support."
)
self.context.minimum_version = ssl.TLSVersion.TLSv1_2
self.expiration = expiration
# tmpdir and its contents are automatically deleted after the CA cert
# and ephemeral cert are loaded into the SSLcontext. The values
# need to be written to files in order to be loaded by the SSLContext
with TemporaryDirectory() as tmpdir:
ca_filename, cert_filename, key_filename = write_to_file(
tmpdir, server_ca_cert, ephemeral_cert, private_key
)
self.context.load_cert_chain(cert_filename, keyfile=key_filename)
self.context.load_verify_locations(cafile=ca_filename)
def get_preferred_ip(self, ip_type: IPTypes) -> str:
"""Returns the first IP address for the instance, according to the preference
supplied by ip_type. If no IP addressess with the given preference are found,
an error is raised."""
if ip_type.value in self.ip_addrs:
return self.ip_addrs[ip_type.value]
raise CloudSQLIPTypeError(
"Cloud SQL instance does not have any IP addresses matching "
f"preference: {ip_type.value})"
)
class Instance:
"""A class to manage the details of the connection to a Cloud SQL
instance, including refreshing the credentials.
:param instance_connection_string:
The Google Cloud SQL Instance's connection
string.
:type instance_connection_string: str
:param user_agent_string:
The user agent string to append to SQLAdmin API requests
:type user_agent_string: str
:type credentials: google.auth.credentials.Credentials
:param credentials
Credentials object used to authenticate connections to Cloud SQL server.
If not specified, Application Default Credentials are used.
:param enable_iam_auth
Enables automatic IAM database authentication for Postgres or MySQL
instances.
:type enable_iam_auth: bool
:param loop:
A new event loop for the refresh function to run in.
:type loop: asyncio.AbstractEventLoop
:type quota_project: str
:param quota_project
The Project ID for an existing Google Cloud project. The project specified
is used for quota and billing purposes. If not specified, defaults to
project sourced from environment.
:type sqladmin_api_endpoint: str
:param sqladmin_api_endpoint:
Base URL to use when calling the Cloud SQL Admin API endpoint.
Defaults to "https://sqladmin.googleapis.com", this argument should
only be used in development.
"""
# asyncio.AbstractEventLoop is used because the default loop,
# SelectorEventLoop, is usable on both Unix and Windows but has limited
# functionality on Windows. It is recommended to use ProactorEventLoop
# while developing on Windows.
# Link to Github issue:
# https://github.com/GoogleCloudPlatform/cloud-sql-python-connector/issues/22
_loop: asyncio.AbstractEventLoop
_enable_iam_auth: bool
__client_session: Optional[aiohttp.ClientSession] = None
@property
def _client_session(self) -> aiohttp.ClientSession:
if self.__client_session is None:
headers = {
"x-goog-api-client": self._user_agent_string,
"User-Agent": self._user_agent_string,
"Content-Type": "application/json",
}
if self._quota_project:
headers["x-goog-user-project"] = self._quota_project
self.__client_session = aiohttp.ClientSession(headers=headers)
return self.__client_session
_credentials: Optional[Credentials] = None
_keys: asyncio.Future
_instance_connection_string: str
_user_agent_string: str
_sqladmin_api_endpoint: str
_instance: str
_project: str
_region: str
_refresh_rate_limiter: AsyncRateLimiter
_refresh_in_progress: asyncio.locks.Event
_current: asyncio.Task # task wraps coroutine that returns InstanceMetadata
_next: asyncio.Task # task wraps coroutine that returns another task
def __init__(
self,
instance_connection_string: str,
driver_name: str,
keys: asyncio.Future,
loop: asyncio.AbstractEventLoop,
credentials: Optional[Credentials] = None,
enable_iam_auth: bool = False,
quota_project: str = None,
sqladmin_api_endpoint: str = "https://sqladmin.googleapis.com",
) -> None:
# Validate connection string
connection_string_split = instance_connection_string.split(":")
if len(connection_string_split) == 3:
self._instance_connection_string = instance_connection_string
self._project = connection_string_split[0]
self._region = connection_string_split[1]
self._instance = connection_string_split[2]
else:
raise ValueError(
"Arg `instance_connection_string` must have "
"format: PROJECT:REGION:INSTANCE, "
f"got {instance_connection_string}."
)
self._enable_iam_auth = enable_iam_auth
self._user_agent_string = f"{APPLICATION_NAME}/{version}+{driver_name}"
self._quota_project = quota_project
self._sqladmin_api_endpoint = sqladmin_api_endpoint
self._loop = loop
self._keys = keys
# validate credentials type
if not isinstance(credentials, Credentials) and credentials is not None:
raise CredentialsTypeError(
"Arg credentials must be type 'google.auth.credentials.Credentials' "
"or None (to use Application Default Credentials)"
)
self._credentials = _auth_init(credentials)
self._refresh_rate_limiter = AsyncRateLimiter(
max_capacity=2, rate=1 / 30, loop=self._loop
)
self._refresh_in_progress = asyncio.locks.Event()
self._current = self._schedule_refresh(0)
self._next = self._current
async def force_refresh(self) -> None:
"""
Forces a new refresh attempt immediately to be used for future connection attempts.
"""
# if next refresh is not already in progress, cancel it and schedule new one immediately
if not self._refresh_in_progress.is_set():
self._next.cancel()
self._next = self._schedule_refresh(0)
# block all sequential connection attempts on the next refresh result if current is invalid
if not await _is_valid(self._current):
self._current = self._next
async def _perform_refresh(self) -> InstanceMetadata:
"""Retrieves instance metadata and ephemeral certificate from the
Cloud SQL Instance.
:rtype: InstanceMetadata
:returns: A dataclass containing a string representing the ephemeral certificate, a dict
containing the instances IP adresses, a string representing a PEM-encoded private key
and a string representing a PEM-encoded certificate authority.
"""
self._refresh_in_progress.set()
logger.debug(
f"['{self._instance_connection_string}']: Entered _perform_refresh"
)
try:
await self._refresh_rate_limiter.acquire()
priv_key, pub_key = await self._keys
logger.debug(f"['{self._instance_connection_string}']: Creating context")
metadata_task = self._loop.create_task(
_get_metadata(
self._client_session,
self._sqladmin_api_endpoint,
self._credentials,
self._project,
self._region,
self._instance,
)
)
ephemeral_task = self._loop.create_task(
_get_ephemeral(
self._client_session,
self._sqladmin_api_endpoint,
self._credentials,
self._project,
self._instance,
pub_key,
self._enable_iam_auth,
)
)
try:
metadata = await metadata_task
# check if automatic IAM database authn is supported for database engine
if self._enable_iam_auth and not metadata[
"database_version"
].startswith(("POSTGRES", "MYSQL")):
raise AutoIAMAuthNotSupported(
f"'{metadata['database_version']}' does not support automatic IAM authentication. It is only supported with Cloud SQL Postgres or MySQL instances."
)
except Exception:
# cancel ephemeral cert task if exception occurs | ) -> None:
self.ip_addrs = ip_addrs
self.database_version = database_version
self.context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
| random_line_split |
instance.py |
class InstanceMetadata:
ip_addrs: Dict[str, Any]
context: ssl.SSLContext
database_version: str
expiration: datetime.datetime
def __init__(
self,
ephemeral_cert: str,
database_version: str,
ip_addrs: Dict[str, Any],
private_key: bytes,
server_ca_cert: str,
expiration: datetime.datetime,
enable_iam_auth: bool,
) -> None:
self.ip_addrs = ip_addrs
self.database_version = database_version
self.context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# update ssl.PROTOCOL_TLS_CLIENT default
self.context.check_hostname = False
# verify OpenSSL version supports TLSv1.3
if ssl.HAS_TLSv1_3:
# force TLSv1.3 if supported by client
self.context.minimum_version = ssl.TLSVersion.TLSv1_3
# fallback to TLSv1.2 for older versions of OpenSSL
else:
if enable_iam_auth:
raise TLSVersionError(
f"Your current version of OpenSSL ({ssl.OPENSSL_VERSION}) does not "
"support TLSv1.3, which is required to use IAM Authentication.\n"
"Upgrade your OpenSSL version to 1.1.1 for TLSv1.3 support."
)
logger.warning(
"TLSv1.3 is not supported with your version of OpenSSL "
f"({ssl.OPENSSL_VERSION}), falling back to TLSv1.2\n"
"Upgrade your OpenSSL version to 1.1.1 for TLSv1.3 support."
)
self.context.minimum_version = ssl.TLSVersion.TLSv1_2
self.expiration = expiration
# tmpdir and its contents are automatically deleted after the CA cert
# and ephemeral cert are loaded into the SSLcontext. The values
# need to be written to files in order to be loaded by the SSLContext
with TemporaryDirectory() as tmpdir:
ca_filename, cert_filename, key_filename = write_to_file(
tmpdir, server_ca_cert, ephemeral_cert, private_key
)
self.context.load_cert_chain(cert_filename, keyfile=key_filename)
self.context.load_verify_locations(cafile=ca_filename)
def get_preferred_ip(self, ip_type: IPTypes) -> str:
"""Returns the first IP address for the instance, according to the preference
supplied by ip_type. If no IP addressess with the given preference are found,
an error is raised."""
if ip_type.value in self.ip_addrs:
return self.ip_addrs[ip_type.value]
raise CloudSQLIPTypeError(
"Cloud SQL instance does not have any IP addresses matching "
f"preference: {ip_type.value})"
)
class Instance:
"""A class to manage the details of the connection to a Cloud SQL
instance, including refreshing the credentials.
:param instance_connection_string:
The Google Cloud SQL Instance's connection
string.
:type instance_connection_string: str
:param user_agent_string:
The user agent string to append to SQLAdmin API requests
:type user_agent_string: str
:type credentials: google.auth.credentials.Credentials
:param credentials
Credentials object used to authenticate connections to Cloud SQL server.
If not specified, Application Default Credentials are used.
:param enable_iam_auth
Enables automatic IAM database authentication for Postgres or MySQL
instances.
:type enable_iam_auth: bool
:param loop:
A new event loop for the refresh function to run in.
:type loop: asyncio.AbstractEventLoop
:type quota_project: str
:param quota_project
The Project ID for an existing Google Cloud project. The project specified
is used for quota and billing purposes. If not specified, defaults to
project sourced from environment.
:type sqladmin_api_endpoint: str
:param sqladmin_api_endpoint:
Base URL to use when calling the Cloud SQL Admin API endpoint.
Defaults to "https://sqladmin.googleapis.com", this argument should
only be used in development.
"""
# asyncio.AbstractEventLoop is used because the default loop,
# SelectorEventLoop, is usable on both Unix and Windows but has limited
# functionality on Windows. It is recommended to use ProactorEventLoop
# while developing on Windows.
# Link to Github issue:
# https://github.com/GoogleCloudPlatform/cloud-sql-python-connector/issues/22
_loop: asyncio.AbstractEventLoop
_enable_iam_auth: bool
__client_session: Optional[aiohttp.ClientSession] = None
@property
def _client_session(self) -> aiohttp.ClientSession:
if self.__client_session is None:
headers = {
"x-goog-api-client": self._user_agent_string,
"User-Agent": self._user_agent_string,
"Content-Type": "application/json",
}
if self._quota_project:
headers["x-goog-user-project"] = self._quota_project
self.__client_session = aiohttp.ClientSession(headers=headers)
return self.__client_session
_credentials: Optional[Credentials] = None
_keys: asyncio.Future
_instance_connection_string: str
_user_agent_string: str
_sqladmin_api_endpoint: str
_instance: str
_project: str
_region: str
_refresh_rate_limiter: AsyncRateLimiter
_refresh_in_progress: asyncio.locks.Event
_current: asyncio.Task # task wraps coroutine that returns InstanceMetadata
_next: asyncio.Task # task wraps coroutine that returns another task
def __init__(
self,
instance_connection_string: str,
driver_name: str,
keys: asyncio.Future,
loop: asyncio.AbstractEventLoop,
credentials: Optional[Credentials] = None,
enable_iam_auth: bool = False,
quota_project: str = None,
sqladmin_api_endpoint: str = "https://sqladmin.googleapis.com",
) -> None:
# Validate connection string
connection_string_split = instance_connection_string.split(":")
if len(connection_string_split) == 3:
self._instance_connection_string = instance_connection_string
self._project = connection_string_split[0]
self._region = connection_string_split[1]
self._instance = connection_string_split[2]
else:
raise ValueError(
"Arg `instance_connection_string` must have "
"format: PROJECT:REGION:INSTANCE, "
f"got {instance_connection_string}."
)
self._enable_iam_auth = enable_iam_auth
self._user_agent_string = f"{APPLICATION_NAME}/{version}+{driver_name}"
self._quota_project = quota_project
self._sqladmin_api_endpoint = sqladmin_api_endpoint
self._loop = loop
self._keys = keys
# validate credentials type
if not isinstance(credentials, Credentials) and credentials is not None:
raise CredentialsTypeError(
"Arg credentials must be type 'google.auth.credentials.Credentials' "
"or None (to use Application Default Credentials)"
)
self._credentials = _auth_init(credentials)
self._refresh_rate_limiter = AsyncRateLimiter(
max_capacity=2, rate=1 / 30, loop=self._loop
)
self._refresh_in_progress = asyncio.locks.Event()
self._current = self._schedule_refresh(0)
self._next = self._current
async def force_refresh(self) -> None:
"""
Forces a new refresh attempt immediately to be used for future connection attempts.
"""
# if next refresh is not already in progress, cancel it and schedule new one immediately
if not self._refresh_in_progress.is_set():
self._next.cancel()
self._next = self._schedule_refresh(0)
# block all sequential connection attempts on the next refresh result if current is invalid
if not await _is_valid(self._current):
self._current = self._next
async def _perform_refresh(self) -> InstanceMetadata:
"""Retrieves instance metadata and ephemeral certificate from the
Cloud SQL Instance.
:rtype: InstanceMetadata
:returns: A dataclass containing a string representing the ephemeral certificate, a dict
containing the instances IP adresses, a string representing a PEM-encoded private key
and a string representing a PEM-encoded certificate authority.
"""
self._refresh_in_progress.set()
logger.debug(
f"['{self._instance_connection_string}']: Entered _perform_refresh"
)
try:
await self._refresh_rate_limiter.acquire()
priv_key, pub_key = await self._keys
logger.debug(f"['{self._instance_connection_string}']: Creating context")
metadata_task = self._loop.create_task(
_get_metadata(
self._client_session,
self._sqladmin_api_endpoint,
self._credentials,
self._project,
self._region,
self._instance,
)
)
ephemeral_task = self._loop.create_task(
_get_ephemeral(
self._client_session,
self._sqladmin_api_endpoint,
self._credentials,
self._project,
self._instance,
pub_key,
self._enable_iam_auth,
)
)
try:
| PUBLIC: str = "PRIMARY"
PRIVATE: str = "PRIVATE"
PSC: str = "PSC" | identifier_body |
|
statutes_parse.py | ):
"""
Exception is raised if a unit in a refren cannot be parsed.
"""
pass
class StatutesParser(StatutesProcessor):
"""
Class to parse the content of a reference area identified by StatutesExtractor
"""
def parse_main(self, main_text: str) -> list:
"""
Parses a string containing a reference to a specific section within a given law.
E.g. "§ 123 Abs. 4 Satz 5 und 6".
The parsed informtaion is formatted into lists nested in lists nested in lists.
The outer list is a list of references.
References are lists of path components. A path component is e.g. "Abs. 4".
A path component is represented by a list with two elements: The first
contains the unit the second the value.
The example above would be represented as
`[[['§', '123'], ['Abs', '4'], ['Satz', '5']],
[['§', '123'], ['Abs', '4'], ['Satz', '6']]]`.
Args:
main_text: string to parse
Returns: The parsed reference.
"""
citation = self.fix_errors_in_citation(main_text.strip())
enum_parts = self.split_citation_into_enum_parts(citation)
reference_paths = []
for enum_part in enum_parts:
for string in enum_part:
splitted_citation_part_list = list(self.split_citation_part(string))
if len(splitted_citation_part_list):
reference_paths.append(splitted_citation_part_list)
else:
print(f"Empty citation part in {citation} in part {string}")
reference_paths = self.split_parts_accidently_joined(reference_paths)
for reference_path in reference_paths[1:]:
prev_reference_path = reference_paths[
reference_paths.index(reference_path) - 1
]
self.infer_units(reference_path, prev_reference_path)
return reference_paths
def parse_law(self, law_text: str, match_type: str, current_lawid: str = None):
"""
Parses the law information from a references found by StatutesMatchWithMainArea
Args:
main_text: E.g. "§ 123 Abs. 4 und 5 Nr. 6"
law_text: E.g. "BGB"
match_type: E.g. "dict"
Returns: The key of a parse law.
"""
if match_type == "dict":
lawname_stem = stem_law_name(law_text)
match = self.match_law_name(lawname_stem)
return self.laws_lookup[match]
elif match_type == "sgb":
lawid = sgb_dict[stem_law_name(law_text)]
if type(lawid) is tuple:
asse | else:
return lawid
elif match_type == "internal":
if current_lawid is None:
raise Exception("Current law id must be set for internal reference")
return current_lawid
else:
return None # match_type: ignore or unknown
@staticmethod
def stem_unit(unit: str):
"""
Brings a unit into a standard format. E.g. removes abbreviations, grammatical
differences spelling errors, etc.
Args:
unit: A string containing a unit that should be converted into a standard
format.
Returns: Unit in a standard format as string. E.g. §, Art, Nr, Halbsatz,
Anhang, ...
"""
for unit_pattern in unit_patterns:
if regex.fullmatch(unit_pattern, unit):
return unit_patterns[unit_pattern]
raise NoUnitMatched(unit)
@staticmethod
def is_unit(token: str):
"""
Returns: True if the token is a unit
"""
return regex.fullmatch("|".join(unit_patterns.keys()), token)
@staticmethod
def is_pre_numb(token: str):
"""
Returns: True if the token is a number that comes *before* the unit.
E.g. '*erster* Halbsatz'
"""
return pre_numb_pattern.fullmatch(
token,
)
@staticmethod
def is_numb(token: str):
"""
Returns: True if the token is a 'numeric' value of the reference.
"""
return numb_pattern.fullmatch(
token,
)
@staticmethod
def fix_errors_in_citation(citation):
"""
Fix some common inconsistencies in the references such as double spaces.
"""
result = regex.sub(r"\s+", " ", citation)
result = regex.sub(r"§(?=\d)", "§ ", result)
result = regex.sub(r",\sbis\s", " bis ", result)
return result
@staticmethod
def split_citation_into_enum_parts(citation):
"""
A citation can contain references to multiple parts of the law.
E.g. '§§ 20 und 35' or 'Art. 3 Abs. 1 Satz 1, Abs. 3 Satz 1'.
The citation is split into parts so that each referenced section of the law is
separated. E.g. '§§ 20' and '35' resp. 'Art. 3 Abs. 1 Satz 1' and
'Abs. 3 Satz 1'.
However, ranges are not spit: E.g. "§§ 1 bis 10" will not be split.
"""
enum_parts = split_citation_into_parts_pattern.split(
citation,
)
# Split range
enum_parts = [
split_citation_into_range_parts_pattern.split(part) for part in enum_parts
]
return enum_parts
@staticmethod
def split_parts_accidently_joined(reference_paths):
"""
Reformats the parsed references to separate accitently joined references.
E.g. the original referehence "§ 123 § 126" will not be split by
split_citation_into_enum_parts because the separation is falsly not indicated by
a ',', 'or' etc. It come from the unit '§' that it can be inferred that the
citation contains references to two parts of statutes.
This function accounts for the case that the unit '§' or 'Art' appears twice in
the same reference path and split the path into several elements.
"""
new_reference_paths = []
main_unit = (
"Art"
if Counter([part[0] for part in itertools.chain(*reference_paths)]).get(
"Art"
)
else "§"
)
for reference_path in reference_paths:
temp_path = []
for part in reference_path:
if part[0] == main_unit:
if len(temp_path):
new_reference_paths.append(temp_path)
temp_path = []
temp_path.append(part)
new_reference_paths.append(temp_path)
return new_reference_paths
@staticmethod
def infer_units(reference_path, prev_reference_path):
"""
In some cases of an enumeration a numeric value is not directed prefixed by
the corresponding unit. E.g. "§ 123 Abs. 1 S. 2, 3 S. 4". In this case "3"
is not prefixed with its unit. Instead it can be inferred by looking at the
whole citation that it is next higher unit of "S.", hence "Abs.". These
inferred units are added to parsed data.
"""
prev_path_units = [o[0] for o in prev_reference_path]
if reference_path[0][0]:
pass
elif len(reference_path) > 1:
try:
prev_unit_index = prev_path_units.index(reference_path[1][0])
# if not prev_unit_index > 0:
# print(f'Infer unit error: {citation}')
reference_path[0][0] = prev_path_units[prev_unit_index - 1]
except ValueError:
reference_path[0][0] = prev_path_units[-1]
else:
reference_path[0][0] = prev_path_units[-1]
try:
prev_unit_index = prev_path_units.index(reference_path[0][0])
reference_path[0:0] = prev_reference_path[:prev_unit_index]
except Exception:
reference_path[0:0] = prev_reference_path
@staticmethod
def split_citation_part(string: str):
"""
A string a tokenizes. Tokens are identified as units or values. Pairs are
built to connect the units with their respective values. If the unit cannot
be indentified (and must be inferred later) None is returned.
Args:
string: A string that is part of a reference and cites *one* part a statute.
Retruns: As a generator tuples are returned, each containing the unit (or None)
and the respecive value.
"""
# Tokenization
# fmt: off
string = regex.sub(
r"("
r"\d+(?>\.\d+)?[a-z]?|"
r"\b[ivx]+|"
r"\ | rt len(lawid) == 2
if lawid[0] in self.laws_lookup.values():
return lawid[0]
elif lawid[1] in self.laws_lookup.values():
return lawid[1]
else:
return lawid[1]
| conditional_block |
statutes_parse.py | (Exception):
"""
Exception is raised if a unit in a refren cannot be parsed.
"""
pass
class StatutesParser(StatutesProcessor):
"""
Class to parse the content of a reference area identified by StatutesExtractor
"""
| def parse_main(self, main_text: str) -> list:
"""
Parses a string containing a reference to a specific section within a given law.
E.g. "§ 123 Abs. 4 Satz 5 und 6".
The parsed informtaion is formatted into lists nested in lists nested in lists.
The outer list is a list of references.
References are lists of path components. A path component is e.g. "Abs. 4".
A path component is represented by a list with two elements: The first
contains the unit the second the value.
The example above would be represented as
`[[['§', '123'], ['Abs', '4'], ['Satz', '5']],
[['§', '123'], ['Abs', '4'], ['Satz', '6']]]`.
Args:
main_text: string to parse
Returns: The parsed reference.
"""
citation = self.fix_errors_in_citation(main_text.strip())
enum_parts = self.split_citation_into_enum_parts(citation)
reference_paths = []
for enum_part in enum_parts:
for string in enum_part:
splitted_citation_part_list = list(self.split_citation_part(string))
if len(splitted_citation_part_list):
reference_paths.append(splitted_citation_part_list)
else:
print(f"Empty citation part in {citation} in part {string}")
reference_paths = self.split_parts_accidently_joined(reference_paths)
for reference_path in reference_paths[1:]:
prev_reference_path = reference_paths[
reference_paths.index(reference_path) - 1
]
self.infer_units(reference_path, prev_reference_path)
return reference_paths
def parse_law(self, law_text: str, match_type: str, current_lawid: str = None):
"""
Parses the law information from a references found by StatutesMatchWithMainArea
Args:
main_text: E.g. "§ 123 Abs. 4 und 5 Nr. 6"
law_text: E.g. "BGB"
match_type: E.g. "dict"
Returns: The key of a parse law.
"""
if match_type == "dict":
lawname_stem = stem_law_name(law_text)
match = self.match_law_name(lawname_stem)
return self.laws_lookup[match]
elif match_type == "sgb":
lawid = sgb_dict[stem_law_name(law_text)]
if type(lawid) is tuple:
assert len(lawid) == 2
if lawid[0] in self.laws_lookup.values():
return lawid[0]
elif lawid[1] in self.laws_lookup.values():
return lawid[1]
else:
return lawid[1]
else:
return lawid
elif match_type == "internal":
if current_lawid is None:
raise Exception("Current law id must be set for internal reference")
return current_lawid
else:
return None # match_type: ignore or unknown
@staticmethod
def stem_unit(unit: str):
"""
Brings a unit into a standard format. E.g. removes abbreviations, grammatical
differences spelling errors, etc.
Args:
unit: A string containing a unit that should be converted into a standard
format.
Returns: Unit in a standard format as string. E.g. §, Art, Nr, Halbsatz,
Anhang, ...
"""
for unit_pattern in unit_patterns:
if regex.fullmatch(unit_pattern, unit):
return unit_patterns[unit_pattern]
raise NoUnitMatched(unit)
@staticmethod
def is_unit(token: str):
"""
Returns: True if the token is a unit
"""
return regex.fullmatch("|".join(unit_patterns.keys()), token)
@staticmethod
def is_pre_numb(token: str):
"""
Returns: True if the token is a number that comes *before* the unit.
E.g. '*erster* Halbsatz'
"""
return pre_numb_pattern.fullmatch(
token,
)
@staticmethod
def is_numb(token: str):
"""
Returns: True if the token is a 'numeric' value of the reference.
"""
return numb_pattern.fullmatch(
token,
)
@staticmethod
def fix_errors_in_citation(citation):
"""
Fix some common inconsistencies in the references such as double spaces.
"""
result = regex.sub(r"\s+", " ", citation)
result = regex.sub(r"§(?=\d)", "§ ", result)
result = regex.sub(r",\sbis\s", " bis ", result)
return result
@staticmethod
def split_citation_into_enum_parts(citation):
"""
A citation can contain references to multiple parts of the law.
E.g. '§§ 20 und 35' or 'Art. 3 Abs. 1 Satz 1, Abs. 3 Satz 1'.
The citation is split into parts so that each referenced section of the law is
separated. E.g. '§§ 20' and '35' resp. 'Art. 3 Abs. 1 Satz 1' and
'Abs. 3 Satz 1'.
However, ranges are not spit: E.g. "§§ 1 bis 10" will not be split.
"""
enum_parts = split_citation_into_parts_pattern.split(
citation,
)
# Split range
enum_parts = [
split_citation_into_range_parts_pattern.split(part) for part in enum_parts
]
return enum_parts
@staticmethod
def split_parts_accidently_joined(reference_paths):
"""
Reformats the parsed references to separate accitently joined references.
E.g. the original referehence "§ 123 § 126" will not be split by
split_citation_into_enum_parts because the separation is falsly not indicated by
a ',', 'or' etc. It come from the unit '§' that it can be inferred that the
citation contains references to two parts of statutes.
This function accounts for the case that the unit '§' or 'Art' appears twice in
the same reference path and split the path into several elements.
"""
new_reference_paths = []
main_unit = (
"Art"
if Counter([part[0] for part in itertools.chain(*reference_paths)]).get(
"Art"
)
else "§"
)
for reference_path in reference_paths:
temp_path = []
for part in reference_path:
if part[0] == main_unit:
if len(temp_path):
new_reference_paths.append(temp_path)
temp_path = []
temp_path.append(part)
new_reference_paths.append(temp_path)
return new_reference_paths
@staticmethod
def infer_units(reference_path, prev_reference_path):
"""
In some cases of an enumeration a numeric value is not directed prefixed by
the corresponding unit. E.g. "§ 123 Abs. 1 S. 2, 3 S. 4". In this case "3"
is not prefixed with its unit. Instead it can be inferred by looking at the
whole citation that it is next higher unit of "S.", hence "Abs.". These
inferred units are added to parsed data.
"""
prev_path_units = [o[0] for o in prev_reference_path]
if reference_path[0][0]:
pass
elif len(reference_path) > 1:
try:
prev_unit_index = prev_path_units.index(reference_path[1][0])
# if not prev_unit_index > 0:
# print(f'Infer unit error: {citation}')
reference_path[0][0] = prev_path_units[prev_unit_index - 1]
except ValueError:
reference_path[0][0] = prev_path_units[-1]
else:
reference_path[0][0] = prev_path_units[-1]
try:
prev_unit_index = prev_path_units.index(reference_path[0][0])
reference_path[0:0] = prev_reference_path[:prev_unit_index]
except Exception:
reference_path[0:0] = prev_reference_path
@staticmethod
def split_citation_part(string: str):
"""
A string a tokenizes. Tokens are identified as units or values. Pairs are
built to connect the units with their respective values. If the unit cannot
be indentified (and must be inferred later) None is returned.
Args:
string: A string that is part of a reference and cites *one* part a statute.
Retruns: As a generator tuples are returned, each containing the unit (or None)
and the respecive value.
"""
# Tokenization
# fmt: off
string = regex.sub(
r"("
r"\d+(?>\.\d+)?[a-z]?|"
r"\b[ivx]+|"
r"\b[a | random_line_split |
|
statutes_parse.py | ):
"""
Exception is raised if a unit in a refren cannot be parsed.
"""
pass
class StatutesParser(StatutesProcessor):
"""
Class to parse the content of a reference area identified by StatutesExtractor
"""
def parse_main(self, main_text: str) -> list:
"""
Parses a string containing a reference to a specific section within a given law.
E.g. "§ 123 Abs. 4 Satz 5 und 6".
The parsed informtaion is formatted into lists nested in lists nested in lists.
The outer list is a list of references.
References are lists of path components. A path component is e.g. "Abs. 4".
A path component is represented by a list with two elements: The first
contains the unit the second the value.
The example above would be represented as
`[[['§', '123'], ['Abs', '4'], ['Satz', '5']],
[['§', '123'], ['Abs', '4'], ['Satz', '6']]]`.
Args:
main_text: string to parse
Returns: The parsed reference.
"""
citation = self.fix_errors_in_citation(main_text.strip())
enum_parts = self.split_citation_into_enum_parts(citation)
reference_paths = []
for enum_part in enum_parts:
for string in enum_part:
splitted_citation_part_list = list(self.split_citation_part(string))
if len(splitted_citation_part_list):
reference_paths.append(splitted_citation_part_list)
else:
print(f"Empty citation part in {citation} in part {string}")
reference_paths = self.split_parts_accidently_joined(reference_paths)
for reference_path in reference_paths[1:]:
prev_reference_path = reference_paths[
reference_paths.index(reference_path) - 1
]
self.infer_units(reference_path, prev_reference_path)
return reference_paths
def parse_law(self, law_text: str, match_type: str, current_lawid: str = None):
"""
Parses the law information from a references found by StatutesMatchWithMainArea
Args:
main_text: E.g. "§ 123 Abs. 4 und 5 Nr. 6"
law_text: E.g. "BGB"
match_type: E.g. "dict"
Returns: The key of a parse law.
"""
if match_type == "dict":
lawname_stem = stem_law_name(law_text)
match = self.match_law_name(lawname_stem)
return self.laws_lookup[match]
elif match_type == "sgb":
lawid = sgb_dict[stem_law_name(law_text)]
if type(lawid) is tuple:
assert len(lawid) == 2
if lawid[0] in self.laws_lookup.values():
return lawid[0]
elif lawid[1] in self.laws_lookup.values():
return lawid[1]
else:
return lawid[1]
else:
return lawid
elif match_type == "internal":
if current_lawid is None:
raise Exception("Current law id must be set for internal reference")
return current_lawid
else:
return None # match_type: ignore or unknown
@staticmethod
def stem_unit(unit: str):
"""
Brings a unit into a standard format. E.g. removes abbreviations, grammatical
differences spelling errors, etc.
Args:
unit: A string containing a unit that should be converted into a standard
format.
Returns: Unit in a standard format as string. E.g. §, Art, Nr, Halbsatz,
Anhang, ...
"""
for unit_pattern in unit_patterns:
if regex.fullmatch(unit_pattern, unit):
return unit_patterns[unit_pattern]
raise NoUnitMatched(unit)
@staticmethod
def is_unit(token: str):
"""
Returns: True if the token is a unit
"""
return regex.fullmatch("|".join(unit_patterns.keys()), token)
@staticmethod
def is_pre_numb(token: str):
"""
Returns: True if the token is a number that comes *before* the unit.
E.g. '*erster* Halbsatz'
"""
return pre_numb_pattern.fullmatch(
token,
)
@staticmethod
def is_nu | n: str):
"""
Returns: True if the token is a 'numeric' value of the reference.
"""
return numb_pattern.fullmatch(
token,
)
@staticmethod
def fix_errors_in_citation(citation):
"""
Fix some common inconsistencies in the references such as double spaces.
"""
result = regex.sub(r"\s+", " ", citation)
result = regex.sub(r"§(?=\d)", "§ ", result)
result = regex.sub(r",\sbis\s", " bis ", result)
return result
@staticmethod
def split_citation_into_enum_parts(citation):
"""
A citation can contain references to multiple parts of the law.
E.g. '§§ 20 und 35' or 'Art. 3 Abs. 1 Satz 1, Abs. 3 Satz 1'.
The citation is split into parts so that each referenced section of the law is
separated. E.g. '§§ 20' and '35' resp. 'Art. 3 Abs. 1 Satz 1' and
'Abs. 3 Satz 1'.
However, ranges are not spit: E.g. "§§ 1 bis 10" will not be split.
"""
enum_parts = split_citation_into_parts_pattern.split(
citation,
)
# Split range
enum_parts = [
split_citation_into_range_parts_pattern.split(part) for part in enum_parts
]
return enum_parts
@staticmethod
def split_parts_accidently_joined(reference_paths):
"""
Reformats the parsed references to separate accitently joined references.
E.g. the original referehence "§ 123 § 126" will not be split by
split_citation_into_enum_parts because the separation is falsly not indicated by
a ',', 'or' etc. It come from the unit '§' that it can be inferred that the
citation contains references to two parts of statutes.
This function accounts for the case that the unit '§' or 'Art' appears twice in
the same reference path and split the path into several elements.
"""
new_reference_paths = []
main_unit = (
"Art"
if Counter([part[0] for part in itertools.chain(*reference_paths)]).get(
"Art"
)
else "§"
)
for reference_path in reference_paths:
temp_path = []
for part in reference_path:
if part[0] == main_unit:
if len(temp_path):
new_reference_paths.append(temp_path)
temp_path = []
temp_path.append(part)
new_reference_paths.append(temp_path)
return new_reference_paths
@staticmethod
def infer_units(reference_path, prev_reference_path):
"""
In some cases of an enumeration a numeric value is not directed prefixed by
the corresponding unit. E.g. "§ 123 Abs. 1 S. 2, 3 S. 4". In this case "3"
is not prefixed with its unit. Instead it can be inferred by looking at the
whole citation that it is next higher unit of "S.", hence "Abs.". These
inferred units are added to parsed data.
"""
prev_path_units = [o[0] for o in prev_reference_path]
if reference_path[0][0]:
pass
elif len(reference_path) > 1:
try:
prev_unit_index = prev_path_units.index(reference_path[1][0])
# if not prev_unit_index > 0:
# print(f'Infer unit error: {citation}')
reference_path[0][0] = prev_path_units[prev_unit_index - 1]
except ValueError:
reference_path[0][0] = prev_path_units[-1]
else:
reference_path[0][0] = prev_path_units[-1]
try:
prev_unit_index = prev_path_units.index(reference_path[0][0])
reference_path[0:0] = prev_reference_path[:prev_unit_index]
except Exception:
reference_path[0:0] = prev_reference_path
@staticmethod
def split_citation_part(string: str):
"""
A string a tokenizes. Tokens are identified as units or values. Pairs are
built to connect the units with their respective values. If the unit cannot
be indentified (and must be inferred later) None is returned.
Args:
string: A string that is part of a reference and cites *one* part a statute.
Retruns: As a generator tuples are returned, each containing the unit (or None)
and the respecive value.
"""
# Tokenization
# fmt: off
string = regex.sub(
r"("
r"\d+(?>\.\d+)?[a-z]?|"
r"\b[ivx]+|"
r"\ | mb(toke | identifier_name |
statutes_parse.py | ):
"""
Exception is raised if a unit in a refren cannot be parsed.
"""
pass
class StatutesParser(StatutesProcessor):
"""
Class to parse the content of a reference area identified by StatutesExtractor
"""
def parse_main(self, main_text: str) -> list:
"""
Parses a string containing a reference to a specific section within a given law.
E.g. "§ 123 Abs. 4 Satz 5 und 6".
The parsed informtaion is formatted into lists nested in lists nested in lists.
The outer list is a list of references.
References are lists of path components. A path component is e.g. "Abs. 4".
A path component is represented by a list with two elements: The first
contains the unit the second the value.
The example above would be represented as
`[[['§', '123'], ['Abs', '4'], ['Satz', '5']],
[['§', '123'], ['Abs', '4'], ['Satz', '6']]]`.
Args:
main_text: string to parse
Returns: The parsed reference.
"""
citation = self.fix_errors_in_citation(main_text.strip())
enum_parts = self.split_citation_into_enum_parts(citation)
reference_paths = []
for enum_part in enum_parts:
for string in enum_part:
splitted_citation_part_list = list(self.split_citation_part(string))
if len(splitted_citation_part_list):
reference_paths.append(splitted_citation_part_list)
else:
print(f"Empty citation part in {citation} in part {string}")
reference_paths = self.split_parts_accidently_joined(reference_paths)
for reference_path in reference_paths[1:]:
prev_reference_path = reference_paths[
reference_paths.index(reference_path) - 1
]
self.infer_units(reference_path, prev_reference_path)
return reference_paths
def parse_law(self, law_text: str, match_type: str, current_lawid: str = None):
"""
Parses the law information from a references found by StatutesMatchWithMainArea
Args:
main_text: E.g. "§ 123 Abs. 4 und 5 Nr. 6"
law_text: E.g. "BGB"
match_type: E.g. "dict"
Returns: The key of a parse law.
"""
if match_type == "dict":
lawname_stem = stem_law_name(law_text)
match = self.match_law_name(lawname_stem)
return self.laws_lookup[match]
elif match_type == "sgb":
lawid = sgb_dict[stem_law_name(law_text)]
if type(lawid) is tuple:
assert len(lawid) == 2
if lawid[0] in self.laws_lookup.values():
return lawid[0]
elif lawid[1] in self.laws_lookup.values():
return lawid[1]
else:
return lawid[1]
else:
return lawid
elif match_type == "internal":
if current_lawid is None:
raise Exception("Current law id must be set for internal reference")
return current_lawid
else:
return None # match_type: ignore or unknown
@staticmethod
def stem_unit(unit: str):
"""
Brings a unit into a standard format. E.g. removes abbreviations, grammatical
differences spelling errors, etc.
Args:
unit: A string containing a unit that should be converted into a standard
format.
Returns: Unit in a standard format as string. E.g. §, Art, Nr, Halbsatz,
Anhang, ...
"""
for unit_pattern in unit_patterns:
if regex.fullmatch(unit_pattern, unit):
return unit_patterns[unit_pattern]
raise NoUnitMatched(unit)
@staticmethod
def is_unit(token: str):
"""
Returns: True if the token is a unit
"""
return regex.fullmatch("|".join(unit_patterns.keys()), token)
@staticmethod
def is_pre_numb(token: str):
"""
Returns: True if the token is a number that comes *before* the unit.
E.g. '*erster* Halbsatz'
"""
return pre_numb_pattern.fullmatch(
token,
)
@staticmethod
def is_numb(token: str):
"""
Returns: True if the token is a 'numeric' value of the reference.
"""
return numb_pattern.fullmatch(
token,
)
@staticmethod
def fix_errors_in_citation(citation):
"""
Fix some common inconsistencies in the references such as double spaces.
"""
result = regex.sub(r"\s+", " ", citation)
result = regex.sub(r"§(?=\d)", "§ ", result)
result = regex.sub(r",\sbis\s", " bis ", result)
return result
@staticmethod
def split_citation_into_enum_parts(citation):
"""
| method
def split_parts_accidently_joined(reference_paths):
"""
Reformats the parsed references to separate accitently joined references.
E.g. the original referehence "§ 123 § 126" will not be split by
split_citation_into_enum_parts because the separation is falsly not indicated by
a ',', 'or' etc. It come from the unit '§' that it can be inferred that the
citation contains references to two parts of statutes.
This function accounts for the case that the unit '§' or 'Art' appears twice in
the same reference path and split the path into several elements.
"""
new_reference_paths = []
main_unit = (
"Art"
if Counter([part[0] for part in itertools.chain(*reference_paths)]).get(
"Art"
)
else "§"
)
for reference_path in reference_paths:
temp_path = []
for part in reference_path:
if part[0] == main_unit:
if len(temp_path):
new_reference_paths.append(temp_path)
temp_path = []
temp_path.append(part)
new_reference_paths.append(temp_path)
return new_reference_paths
@staticmethod
def infer_units(reference_path, prev_reference_path):
"""
In some cases of an enumeration a numeric value is not directed prefixed by
the corresponding unit. E.g. "§ 123 Abs. 1 S. 2, 3 S. 4". In this case "3"
is not prefixed with its unit. Instead it can be inferred by looking at the
whole citation that it is next higher unit of "S.", hence "Abs.". These
inferred units are added to parsed data.
"""
prev_path_units = [o[0] for o in prev_reference_path]
if reference_path[0][0]:
pass
elif len(reference_path) > 1:
try:
prev_unit_index = prev_path_units.index(reference_path[1][0])
# if not prev_unit_index > 0:
# print(f'Infer unit error: {citation}')
reference_path[0][0] = prev_path_units[prev_unit_index - 1]
except ValueError:
reference_path[0][0] = prev_path_units[-1]
else:
reference_path[0][0] = prev_path_units[-1]
try:
prev_unit_index = prev_path_units.index(reference_path[0][0])
reference_path[0:0] = prev_reference_path[:prev_unit_index]
except Exception:
reference_path[0:0] = prev_reference_path
@staticmethod
def split_citation_part(string: str):
"""
A string a tokenizes. Tokens are identified as units or values. Pairs are
built to connect the units with their respective values. If the unit cannot
be indentified (and must be inferred later) None is returned.
Args:
string: A string that is part of a reference and cites *one* part a statute.
Retruns: As a generator tuples are returned, each containing the unit (or None)
and the respecive value.
"""
# Tokenization
# fmt: off
string = regex.sub(
r"("
r"\d+(?>\.\d+)?[a-z]?|"
r"\b[ivx]+|"
r"\b | A citation can contain references to multiple parts of the law.
E.g. '§§ 20 und 35' or 'Art. 3 Abs. 1 Satz 1, Abs. 3 Satz 1'.
The citation is split into parts so that each referenced section of the law is
separated. E.g. '§§ 20' and '35' resp. 'Art. 3 Abs. 1 Satz 1' and
'Abs. 3 Satz 1'.
However, ranges are not spit: E.g. "§§ 1 bis 10" will not be split.
"""
enum_parts = split_citation_into_parts_pattern.split(
citation,
)
# Split range
enum_parts = [
split_citation_into_range_parts_pattern.split(part) for part in enum_parts
]
return enum_parts
@static | identifier_body |
matrix.py | OpenMaya.MMatrix.identity:
if invertMatrix: matrix = matrix.inverse()
vector *= matrix
# Return new vector
return [vector.x,vector.y,vector.z]
def getTranslation(matrix):
'''
Return the translation component of a matrix.
@param matrix: Matrix to extract translation from
@type matrix: maya.OpenMaya.MMatrix
'''
x = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],0)
y = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],1)
z = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],2)
return (x,y,z)
def getRotation(matrix,rotationOrder='xyz'):
'''
Return the rotation component of a matrix as euler (XYZ) values.
@param matrix: Matrix to extract rotation from
@type matrix: maya.OpenMaya.MMatrix
@param rotationOrder: Rotation order of the matrix
@type rotationOrder: str or int
'''
# Calculate radian constant
radian = 180.0/math.pi
# Check rotation order
if type(rotationOrder) == str:
rotationOrder = rotationOrder.lower()
rotateOrder = {'xyz':0,'yzx':1,'zxy':2,'xzy':3,'yxz':4,'zyx':5}
if not rotateOrder.has_key(rotationOrder):
raise Exception('Invalid rotation order supplied!')
rotationOrder = rotateOrder[rotationOrder]
else:
rotationOrder = int(rotationOrder)
# Get transformation matrix
transformMatrix = OpenMaya.MTransformationMatrix(matrix)
# Get Euler rotation from matrix
eulerRot = transformMatrix.eulerRotation()
# Reorder rotation
eulerRot.reorderIt(rotationOrder)
# Return XYZ rotation values
return (eulerRot.x*radian,eulerRot.y*radian,eulerRot.z*radian)
def buildRotation(aimVector,upVector=(0,1,0),aimAxis='x',upAxis='y'):
'''
Build rotation matrix from the specified inputs
@param aimVector: Aim vector for construction of rotation matrix (worldSpace)
@type aimVector: tuple or list
@param upVector: Up vector for construction of rotation matrix (worldSpace)
@type upVector: tuple or list
@param aimAxis: Aim vector for construction of rotation matrix
@type aimAxis: str
@param upAxis: Up vector for construction of rotation matrix
@type upAxis: str
'''
# Check negative axis
negAim = False
negUp = False
if aimAxis[0] == '-':
aimAxis = aimAxis[1]
negAim = True
if upAxis[0] == '-':
upAxis = upAxis[1]
negUp = True
# Check valid axis
axisList = ['x','y','z']
if not axisList.count(aimAxis): raise Exception('Aim axis is not valid!')
if not axisList.count(upAxis): raise Exception('Up axis is not valid!')
if aimAxis == upAxis: raise Exception('Aim and Up axis must be unique!')
# Determine cross axis
axisList.remove(aimAxis)
axisList.remove(upAxis)
crossAxis = axisList[0]
# Normaize aimVector
aimVector = mathUtils.normalizeVector(aimVector)
if negAim: aimVector = (-aimVector[0],-aimVector[1],-aimVector[2])
# Normaize upVector
upVector = mathUtils.normalizeVector(upVector)
if negUp: upVector = (-upVector[0],-upVector[1],-upVector[2])
# Get cross product vector
crossVector = (0,0,0)
if (aimAxis == 'x' and upAxis == 'z') or (aimAxis == 'z' and upAxis == 'y'):
crossVector = mathUtils.crossProduct(upVector,aimVector)
else:
crossVector = mathUtils.crossProduct(aimVector,upVector)
# Recalculate upVector (orthogonalize)
if (aimAxis == 'x' and upAxis == 'z') or (aimAxis == 'z' and upAxis == 'y'):
upVector = mathUtils.crossProduct(aimVector,crossVector)
else:
upVector = mathUtils.crossProduct(crossVector,aimVector)
# Build axis dictionary
axisDict={aimAxis: aimVector,upAxis: upVector,crossAxis: crossVector}
# Build rotation matrix
mat = buildMatrix(xAxis=axisDict['x'],yAxis=axisDict['y'],zAxis=axisDict['z'])
# Return rotation matrix
return mat
def inverseTransform(source,destination,translate=True,rotate=True,scale=True):
'''
Apply the inverse of a specified transform to another target transform.
@param source: The source transform that will supply the transformation
@type source: str
@param destination: The destination transform that will receive the inverse transformation
@type destination: str
@param translate: Apply inverse translate to destination transform
@type translate: bool
@param rotate: Apply inverse rotation to destination transform
@type rotate: bool
@param scale: Apply inverse scale to destination transform
@type scale: bool
'''
# ==========
# - Checks -
# ==========
if not mc.objExists(source): raise Exception('Transform "'+source+'" does not exist!!')
if not mc.objExists(destination): raise Exception('Transform "'+destination+'" does not exist!!')
# Load decomposeMatrix plugin
if not mc.pluginInfo('decomposeMatrix',q=True,l=True):
try: mc.loadPlugin('decomposeMatrix')
except: raise MissingPluginError('Unable to load "decomposeMatrix" plugin!!')
# =================================
# - Apply Inverse Transformations -
# =================================
# Create and name decomposeMatrix node
dcm = mc.createNode('decomposeMatrix',n=source+'_decomposeMatrix')
# Make connections
mc.connectAttr(source+'.inverseMatrix',dcm+'.inputMatrix',f=True)
if translate: mc.connectAttr(dcm+'.outputTranslate',destination+'.translate',f=True)
if rotate: mc.connectAttr(dcm+'.outputRotate',destination+'.rotate',f=True)
if scale: mc.connectAttr(dcm+'.outputScale',destination+'.scale',f=True)
# =================
# - Return Result -
# =================
return dcm
def fromList(valueList):
'''
Create matrix from value list.
@param valueList: List of matrix values
@type valueList: list
'''
# Check Value List
if len(valueList) != 16:
raise Exception('Invalid value list! Expecting 16 element, found '+str(len(valueList)))
# Create transformation matrix from input vaules
matrix = OpenMaya.MMatrix()
OpenMaya.MScriptUtil.createMatrixFromList(valueList,matrix)
#OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 0, valueList[0])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 1, valueList[1])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 2, valueList[2])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 3, valueList[3])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 0, valueList[4])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 1, valueList[5])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 2, valueList[6])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 3, valueList[7])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 0, valueList[8])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 1, valueList[9])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 2, valueList[10])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 3, valueList[11])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 0, valueList[12])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 1, valueList[13])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 2, valueList[14])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 3, valueList[15])
# Return Result
return matrix
def asList(matrix):
| '''
Return the specified matrix as a list
@param matrix: Matrix to return list for
@type matrix: maya.OpenMaya.MMatrix
'''
return [ matrix(0,0),matrix(0,1),matrix(0,2),matrix(0,3),
matrix(1,0),matrix(1,1),matrix(1,2),matrix(1,3),
matrix(2,0),matrix(2,1),matrix(2,2),matrix(2,3),
matrix(3,0),matrix(3,1),matrix(3,2),matrix(3,3), ] | identifier_body |
|
matrix.py | OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 1, yAxis[1])
OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 2, yAxis[2])
OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 0, zAxis[0])
OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 1, zAxis[1])
OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 2, zAxis[2])
OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 0, translate[0])
OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 1, translate[1])
OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 2, translate[2])
return matrix
def vectorMatrixMultiply(vector,matrix,transformAsPoint=False,invertMatrix=False):
'''
Transform a vector (or point) by a given transformation matrix.
@param vector: Vector or point to be transformed
@type vector: tuple/list
@param matrix: MMatrix object to provide the transformation
@type matrix: OpenMaya.MMatrix
@param transformAsPoint: Transform the vector as a point
@type transformAsPoint: bool
@param invertMatrix: Use the matrix inverse to transform the vector
@type invertMatrix: bool
'''
# Create MPoint/MVector object for transformation
if transformAsPoint: vector = OpenMaya.MPoint(vector[0],vector[1],vector[2],1.0)
else: vector = OpenMaya.MVector(vector[0],vector[1],vector[2])
# Check input is of type MMatrix
if type(matrix) != OpenMaya.MMatrix:
raise Exception('Matrix input variable is not of expected type! Expecting MMatrix, received '+str(type(matrix))+'!!')
# Transform vector
if matrix != OpenMaya.MMatrix.identity:
if invertMatrix: matrix = matrix.inverse()
vector *= matrix
# Return new vector
return [vector.x,vector.y,vector.z]
def getTranslation(matrix):
'''
Return the translation component of a matrix.
@param matrix: Matrix to extract translation from
@type matrix: maya.OpenMaya.MMatrix
'''
x = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],0)
y = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],1)
z = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],2)
return (x,y,z)
def getRotation(matrix,rotationOrder='xyz'):
'''
Return the rotation component of a matrix as euler (XYZ) values.
@param matrix: Matrix to extract rotation from
@type matrix: maya.OpenMaya.MMatrix
@param rotationOrder: Rotation order of the matrix
@type rotationOrder: str or int
'''
# Calculate radian constant
radian = 180.0/math.pi
# Check rotation order
if type(rotationOrder) == str:
rotationOrder = rotationOrder.lower()
rotateOrder = {'xyz':0,'yzx':1,'zxy':2,'xzy':3,'yxz':4,'zyx':5}
if not rotateOrder.has_key(rotationOrder):
raise Exception('Invalid rotation order supplied!')
rotationOrder = rotateOrder[rotationOrder]
else:
rotationOrder = int(rotationOrder)
# Get transformation matrix
transformMatrix = OpenMaya.MTransformationMatrix(matrix)
# Get Euler rotation from matrix
eulerRot = transformMatrix.eulerRotation()
# Reorder rotation
eulerRot.reorderIt(rotationOrder)
# Return XYZ rotation values
return (eulerRot.x*radian,eulerRot.y*radian,eulerRot.z*radian)
def buildRotation(aimVector,upVector=(0,1,0),aimAxis='x',upAxis='y'):
'''
Build rotation matrix from the specified inputs
@param aimVector: Aim vector for construction of rotation matrix (worldSpace)
@type aimVector: tuple or list
@param upVector: Up vector for construction of rotation matrix (worldSpace)
@type upVector: tuple or list
@param aimAxis: Aim vector for construction of rotation matrix
@type aimAxis: str
@param upAxis: Up vector for construction of rotation matrix
@type upAxis: str
'''
# Check negative axis
negAim = False
negUp = False
if aimAxis[0] == '-':
aimAxis = aimAxis[1]
negAim = True
if upAxis[0] == '-':
upAxis = upAxis[1]
negUp = True
# Check valid axis
axisList = ['x','y','z']
if not axisList.count(aimAxis): raise Exception('Aim axis is not valid!')
if not axisList.count(upAxis): raise Exception('Up axis is not valid!')
if aimAxis == upAxis: raise Exception('Aim and Up axis must be unique!')
# Determine cross axis
axisList.remove(aimAxis)
axisList.remove(upAxis)
crossAxis = axisList[0]
# Normaize aimVector
aimVector = mathUtils.normalizeVector(aimVector)
if negAim: aimVector = (-aimVector[0],-aimVector[1],-aimVector[2])
# Normaize upVector
upVector = mathUtils.normalizeVector(upVector)
if negUp: upVector = (-upVector[0],-upVector[1],-upVector[2])
# Get cross product vector
crossVector = (0,0,0)
if (aimAxis == 'x' and upAxis == 'z') or (aimAxis == 'z' and upAxis == 'y'):
crossVector = mathUtils.crossProduct(upVector,aimVector)
else:
crossVector = mathUtils.crossProduct(aimVector,upVector)
# Recalculate upVector (orthogonalize)
if (aimAxis == 'x' and upAxis == 'z') or (aimAxis == 'z' and upAxis == 'y'):
upVector = mathUtils.crossProduct(aimVector,crossVector)
else:
upVector = mathUtils.crossProduct(crossVector,aimVector)
# Build axis dictionary
axisDict={aimAxis: aimVector,upAxis: upVector,crossAxis: crossVector}
# Build rotation matrix
mat = buildMatrix(xAxis=axisDict['x'],yAxis=axisDict['y'],zAxis=axisDict['z'])
# Return rotation matrix
return mat
def inverseTransform(source,destination,translate=True,rotate=True,scale=True):
'''
Apply the inverse of a specified transform to another target transform.
@param source: The source transform that will supply the transformation
@type source: str
@param destination: The destination transform that will receive the inverse transformation
@type destination: str
@param translate: Apply inverse translate to destination transform
@type translate: bool
@param rotate: Apply inverse rotation to destination transform
@type rotate: bool
@param scale: Apply inverse scale to destination transform
@type scale: bool
'''
# ==========
# - Checks -
# ==========
if not mc.objExists(source): raise Exception('Transform "'+source+'" does not exist!!')
if not mc.objExists(destination): raise Exception('Transform "'+destination+'" does not exist!!')
# Load decomposeMatrix plugin
if not mc.pluginInfo('decomposeMatrix',q=True,l=True):
try: mc.loadPlugin('decomposeMatrix')
except: raise MissingPluginError('Unable to load "decomposeMatrix" plugin!!')
# =================================
# - Apply Inverse Transformations -
# =================================
# Create and name decomposeMatrix node
dcm = mc.createNode('decomposeMatrix',n=source+'_decomposeMatrix')
# Make connections
mc.connectAttr(source+'.inverseMatrix',dcm+'.inputMatrix',f=True)
if translate: mc.connectAttr(dcm+'.outputTranslate',destination+'.translate',f=True)
if rotate: mc.connectAttr(dcm+'.outputRotate',destination+'.rotate',f=True)
if scale: mc.connectAttr(dcm+'.outputScale',destination+'.scale',f=True)
# =================
# - Return Result -
# =================
return dcm
def fromList(valueList):
'''
Create matrix from value list.
@param valueList: List of matrix values
@type valueList: list
'''
# Check Value List
if len(valueList) != 16:
raise Exception('Invalid value list! Expecting 16 element, found '+str(len(valueList)))
# Create transformation matrix from input vaules
matrix = OpenMaya.MMatrix()
OpenMaya.MScriptUtil.createMatrixFromList(valueList,matrix)
#OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 0, valueList[0])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 1, valueList[1]) | #OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 2, valueList[2])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 3, valueList[3])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 0, valueList[4]) | random_line_split |
|
matrix.py | Matrix: Use the matrix inverse to transform the vector
@type invertMatrix: bool
'''
# Create MPoint/MVector object for transformation
if transformAsPoint: vector = OpenMaya.MPoint(vector[0],vector[1],vector[2],1.0)
else: vector = OpenMaya.MVector(vector[0],vector[1],vector[2])
# Check input is of type MMatrix
if type(matrix) != OpenMaya.MMatrix:
raise Exception('Matrix input variable is not of expected type! Expecting MMatrix, received '+str(type(matrix))+'!!')
# Transform vector
if matrix != OpenMaya.MMatrix.identity:
if invertMatrix: matrix = matrix.inverse()
vector *= matrix
# Return new vector
return [vector.x,vector.y,vector.z]
def getTranslation(matrix):
'''
Return the translation component of a matrix.
@param matrix: Matrix to extract translation from
@type matrix: maya.OpenMaya.MMatrix
'''
x = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],0)
y = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],1)
z = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],2)
return (x,y,z)
def getRotation(matrix,rotationOrder='xyz'):
'''
Return the rotation component of a matrix as euler (XYZ) values.
@param matrix: Matrix to extract rotation from
@type matrix: maya.OpenMaya.MMatrix
@param rotationOrder: Rotation order of the matrix
@type rotationOrder: str or int
'''
# Calculate radian constant
radian = 180.0/math.pi
# Check rotation order
if type(rotationOrder) == str:
rotationOrder = rotationOrder.lower()
rotateOrder = {'xyz':0,'yzx':1,'zxy':2,'xzy':3,'yxz':4,'zyx':5}
if not rotateOrder.has_key(rotationOrder):
raise Exception('Invalid rotation order supplied!')
rotationOrder = rotateOrder[rotationOrder]
else:
rotationOrder = int(rotationOrder)
# Get transformation matrix
transformMatrix = OpenMaya.MTransformationMatrix(matrix)
# Get Euler rotation from matrix
eulerRot = transformMatrix.eulerRotation()
# Reorder rotation
eulerRot.reorderIt(rotationOrder)
# Return XYZ rotation values
return (eulerRot.x*radian,eulerRot.y*radian,eulerRot.z*radian)
def buildRotation(aimVector,upVector=(0,1,0),aimAxis='x',upAxis='y'):
'''
Build rotation matrix from the specified inputs
@param aimVector: Aim vector for construction of rotation matrix (worldSpace)
@type aimVector: tuple or list
@param upVector: Up vector for construction of rotation matrix (worldSpace)
@type upVector: tuple or list
@param aimAxis: Aim vector for construction of rotation matrix
@type aimAxis: str
@param upAxis: Up vector for construction of rotation matrix
@type upAxis: str
'''
# Check negative axis
negAim = False
negUp = False
if aimAxis[0] == '-':
aimAxis = aimAxis[1]
negAim = True
if upAxis[0] == '-':
upAxis = upAxis[1]
negUp = True
# Check valid axis
axisList = ['x','y','z']
if not axisList.count(aimAxis): raise Exception('Aim axis is not valid!')
if not axisList.count(upAxis): raise Exception('Up axis is not valid!')
if aimAxis == upAxis: raise Exception('Aim and Up axis must be unique!')
# Determine cross axis
axisList.remove(aimAxis)
axisList.remove(upAxis)
crossAxis = axisList[0]
# Normaize aimVector
aimVector = mathUtils.normalizeVector(aimVector)
if negAim: aimVector = (-aimVector[0],-aimVector[1],-aimVector[2])
# Normaize upVector
upVector = mathUtils.normalizeVector(upVector)
if negUp: upVector = (-upVector[0],-upVector[1],-upVector[2])
# Get cross product vector
crossVector = (0,0,0)
if (aimAxis == 'x' and upAxis == 'z') or (aimAxis == 'z' and upAxis == 'y'):
crossVector = mathUtils.crossProduct(upVector,aimVector)
else:
crossVector = mathUtils.crossProduct(aimVector,upVector)
# Recalculate upVector (orthogonalize)
if (aimAxis == 'x' and upAxis == 'z') or (aimAxis == 'z' and upAxis == 'y'):
upVector = mathUtils.crossProduct(aimVector,crossVector)
else:
upVector = mathUtils.crossProduct(crossVector,aimVector)
# Build axis dictionary
axisDict={aimAxis: aimVector,upAxis: upVector,crossAxis: crossVector}
# Build rotation matrix
mat = buildMatrix(xAxis=axisDict['x'],yAxis=axisDict['y'],zAxis=axisDict['z'])
# Return rotation matrix
return mat
def inverseTransform(source,destination,translate=True,rotate=True,scale=True):
'''
Apply the inverse of a specified transform to another target transform.
@param source: The source transform that will supply the transformation
@type source: str
@param destination: The destination transform that will receive the inverse transformation
@type destination: str
@param translate: Apply inverse translate to destination transform
@type translate: bool
@param rotate: Apply inverse rotation to destination transform
@type rotate: bool
@param scale: Apply inverse scale to destination transform
@type scale: bool
'''
# ==========
# - Checks -
# ==========
if not mc.objExists(source): raise Exception('Transform "'+source+'" does not exist!!')
if not mc.objExists(destination): raise Exception('Transform "'+destination+'" does not exist!!')
# Load decomposeMatrix plugin
if not mc.pluginInfo('decomposeMatrix',q=True,l=True):
try: mc.loadPlugin('decomposeMatrix')
except: raise MissingPluginError('Unable to load "decomposeMatrix" plugin!!')
# =================================
# - Apply Inverse Transformations -
# =================================
# Create and name decomposeMatrix node
dcm = mc.createNode('decomposeMatrix',n=source+'_decomposeMatrix')
# Make connections
mc.connectAttr(source+'.inverseMatrix',dcm+'.inputMatrix',f=True)
if translate: mc.connectAttr(dcm+'.outputTranslate',destination+'.translate',f=True)
if rotate: mc.connectAttr(dcm+'.outputRotate',destination+'.rotate',f=True)
if scale: mc.connectAttr(dcm+'.outputScale',destination+'.scale',f=True)
# =================
# - Return Result -
# =================
return dcm
def fromList(valueList):
'''
Create matrix from value list.
@param valueList: List of matrix values
@type valueList: list
'''
# Check Value List
if len(valueList) != 16:
raise Exception('Invalid value list! Expecting 16 element, found '+str(len(valueList)))
# Create transformation matrix from input vaules
matrix = OpenMaya.MMatrix()
OpenMaya.MScriptUtil.createMatrixFromList(valueList,matrix)
#OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 0, valueList[0])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 1, valueList[1])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 2, valueList[2])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 3, valueList[3])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 0, valueList[4])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 1, valueList[5])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 2, valueList[6])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 3, valueList[7])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 0, valueList[8])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 1, valueList[9])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 2, valueList[10])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 3, valueList[11])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 0, valueList[12])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 1, valueList[13])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 2, valueList[14])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 3, valueList[15])
# Return Result
return matrix
def | asList | identifier_name |
|
matrix.py | values for the matrix
@type translate: tuple/list
@param xAxis: xAxis of the matrix
@type xAxis: tuple/list
@param yAxis: yAxis of the matrix
@type yAxis: tuple/list
@param zAxis: zAxis of the matrix
@type zAxis: tuple/list
'''
# Create transformation matrix from input vectors
matrix = OpenMaya.MMatrix()
values = []
OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 0, xAxis[0])
OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 1, xAxis[1])
OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 2, xAxis[2])
OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 0, yAxis[0])
OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 1, yAxis[1])
OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 2, yAxis[2])
OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 0, zAxis[0])
OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 1, zAxis[1])
OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 2, zAxis[2])
OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 0, translate[0])
OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 1, translate[1])
OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 2, translate[2])
return matrix
def vectorMatrixMultiply(vector,matrix,transformAsPoint=False,invertMatrix=False):
'''
Transform a vector (or point) by a given transformation matrix.
@param vector: Vector or point to be transformed
@type vector: tuple/list
@param matrix: MMatrix object to provide the transformation
@type matrix: OpenMaya.MMatrix
@param transformAsPoint: Transform the vector as a point
@type transformAsPoint: bool
@param invertMatrix: Use the matrix inverse to transform the vector
@type invertMatrix: bool
'''
# Create MPoint/MVector object for transformation
if transformAsPoint: vector = OpenMaya.MPoint(vector[0],vector[1],vector[2],1.0)
else: vector = OpenMaya.MVector(vector[0],vector[1],vector[2])
# Check input is of type MMatrix
if type(matrix) != OpenMaya.MMatrix:
raise Exception('Matrix input variable is not of expected type! Expecting MMatrix, received '+str(type(matrix))+'!!')
# Transform vector
if matrix != OpenMaya.MMatrix.identity:
if invertMatrix: matrix = matrix.inverse()
vector *= matrix
# Return new vector
return [vector.x,vector.y,vector.z]
def getTranslation(matrix):
'''
Return the translation component of a matrix.
@param matrix: Matrix to extract translation from
@type matrix: maya.OpenMaya.MMatrix
'''
x = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],0)
y = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],1)
z = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],2)
return (x,y,z)
def getRotation(matrix,rotationOrder='xyz'):
'''
Return the rotation component of a matrix as euler (XYZ) values.
@param matrix: Matrix to extract rotation from
@type matrix: maya.OpenMaya.MMatrix
@param rotationOrder: Rotation order of the matrix
@type rotationOrder: str or int
'''
# Calculate radian constant
radian = 180.0/math.pi
# Check rotation order
if type(rotationOrder) == str:
rotationOrder = rotationOrder.lower()
rotateOrder = {'xyz':0,'yzx':1,'zxy':2,'xzy':3,'yxz':4,'zyx':5}
if not rotateOrder.has_key(rotationOrder):
raise Exception('Invalid rotation order supplied!')
rotationOrder = rotateOrder[rotationOrder]
else:
rotationOrder = int(rotationOrder)
# Get transformation matrix
transformMatrix = OpenMaya.MTransformationMatrix(matrix)
# Get Euler rotation from matrix
eulerRot = transformMatrix.eulerRotation()
# Reorder rotation
eulerRot.reorderIt(rotationOrder)
# Return XYZ rotation values
return (eulerRot.x*radian,eulerRot.y*radian,eulerRot.z*radian)
def buildRotation(aimVector,upVector=(0,1,0),aimAxis='x',upAxis='y'):
'''
Build rotation matrix from the specified inputs
@param aimVector: Aim vector for construction of rotation matrix (worldSpace)
@type aimVector: tuple or list
@param upVector: Up vector for construction of rotation matrix (worldSpace)
@type upVector: tuple or list
@param aimAxis: Aim vector for construction of rotation matrix
@type aimAxis: str
@param upAxis: Up vector for construction of rotation matrix
@type upAxis: str
'''
# Check negative axis
negAim = False
negUp = False
if aimAxis[0] == '-':
aimAxis = aimAxis[1]
negAim = True
if upAxis[0] == '-':
upAxis = upAxis[1]
negUp = True
# Check valid axis
axisList = ['x','y','z']
if not axisList.count(aimAxis): raise Exception('Aim axis is not valid!')
if not axisList.count(upAxis): raise Exception('Up axis is not valid!')
if aimAxis == upAxis: raise Exception('Aim and Up axis must be unique!')
# Determine cross axis
axisList.remove(aimAxis)
axisList.remove(upAxis)
crossAxis = axisList[0]
# Normaize aimVector
aimVector = mathUtils.normalizeVector(aimVector)
if negAim: aimVector = (-aimVector[0],-aimVector[1],-aimVector[2])
# Normaize upVector
upVector = mathUtils.normalizeVector(upVector)
if negUp: upVector = (-upVector[0],-upVector[1],-upVector[2])
# Get cross product vector
crossVector = (0,0,0)
if (aimAxis == 'x' and upAxis == 'z') or (aimAxis == 'z' and upAxis == 'y'):
crossVector = mathUtils.crossProduct(upVector,aimVector)
else:
crossVector = mathUtils.crossProduct(aimVector,upVector)
# Recalculate upVector (orthogonalize)
if (aimAxis == 'x' and upAxis == 'z') or (aimAxis == 'z' and upAxis == 'y'):
|
else:
upVector = mathUtils.crossProduct(crossVector,aimVector)
# Build axis dictionary
axisDict={aimAxis: aimVector,upAxis: upVector,crossAxis: crossVector}
# Build rotation matrix
mat = buildMatrix(xAxis=axisDict['x'],yAxis=axisDict['y'],zAxis=axisDict['z'])
# Return rotation matrix
return mat
def inverseTransform(source,destination,translate=True,rotate=True,scale=True):
'''
Apply the inverse of a specified transform to another target transform.
@param source: The source transform that will supply the transformation
@type source: str
@param destination: The destination transform that will receive the inverse transformation
@type destination: str
@param translate: Apply inverse translate to destination transform
@type translate: bool
@param rotate: Apply inverse rotation to destination transform
@type rotate: bool
@param scale: Apply inverse scale to destination transform
@type scale: bool
'''
# ==========
# - Checks -
# ==========
if not mc.objExists(source): raise Exception('Transform "'+source+'" does not exist!!')
if not mc.objExists(destination): raise Exception('Transform "'+destination+'" does not exist!!')
# Load decomposeMatrix plugin
if not mc.pluginInfo('decomposeMatrix',q=True,l=True):
try: mc.loadPlugin('decomposeMatrix')
except: raise MissingPluginError('Unable to load "decomposeMatrix" plugin!!')
# =================================
# - Apply Inverse Transformations -
# =================================
# Create and name decomposeMatrix node
dcm = mc.createNode('decomposeMatrix',n=source+'_decomposeMatrix')
# Make connections
mc.connectAttr(source+'.inverseMatrix',dcm+'.inputMatrix',f=True)
if translate: mc.connectAttr(dcm+'.outputTranslate',destination+'.translate',f=True)
if rotate: mc.connectAttr(dcm+'.outputRotate',destination+'.rotate',f=True)
if scale: mc.connectAttr(dcm+'.outputScale',destination+'.scale',f=True)
# =================
# - Return Result -
# =================
return dcm
def fromList(valueList):
'''
Create matrix from value list.
@param valueList: List of matrix values
@type valueList: list
'''
# Check Value List
if len(valueList) != | upVector = mathUtils.crossProduct(aimVector,crossVector) | conditional_block |
main.rs | ,
firing: false,
fire_counter: 0.0,
}) {
Ok(idx) => idx,
Err(_) => {
println!("rejecting connection; too many players");
return;
}
}; | // Set the user ID to some combinaiton of the arena index and generation.
let id = idx.into_raw_parts().0 as u8;
players[idx].player.id = id;
// TODO(jack) Broadcast PlayerLeft messages.
// Broadcast PlayerJoined messages.
let mut tcp_txs: Vec<_> = players
.iter()
.filter(|(other_idx, _)| *other_idx != idx)
.map(|(_, p)| p.tcp_tx.clone())
.collect();
let msg = game::TcpClientMessage::PlayerJoined(id);
tokio::spawn(async move {
let join_handles = tcp_txs.iter_mut().map(|tcp_tx| tcp_tx.send(msg.clone()));
join_all(join_handles).await;
});
// Start tasks to read-from / write-to the TCP socket.
let (mut reader, mut writer) = stream.into_split();
tokio::spawn(async move {
loop {
const MAX_PACKET_SIZE_PLUS_ONE: usize = 64;
let mut buf: [u8; MAX_PACKET_SIZE_PLUS_ONE] = [0; MAX_PACKET_SIZE_PLUS_ONE];
let num_bytes = match reader.read(&mut buf).await {
Ok(0) => break,
Ok(MAX_PACKET_SIZE_PLUS_ONE) => break,
Err(err) => {
eprintln!("{}", err);
break;
}
Ok(num_bytes) => num_bytes,
};
match bincode::deserialize(&buf[..num_bytes]) {
Ok(msg) => match internal_tcp_tx.send((idx, Some(msg))).await {
Ok(_) => (),
Err(_) => break,
},
Err(err) => {
eprintln!("{}", err);
break;
}
};
}
// One consequence of every client publishing TCP packets to the same channel
// is that we don't know when any one disconnects.
// We signal it here with a `None`.
internal_tcp_tx.send((idx, None)).await.ok();
});
let random_bytes = players[idx].random_bytes.clone();
let update = game::WorldUpdate {
tick,
players: players.iter().map(|(_, p)| p.player).collect(),
bullets: bullets.iter().map(|(_, b)| b.bullet).collect(),
};
tokio::spawn(async move {
// Send the init packet.
// For now, this will just include a random sequence of bytes.
// We'll then wait for the random sequence of bytes via UDP to identify the client's external port number.
let bytes = bincode::serialize(&game::TcpClientMessage::Init(game::ClientInit {
id,
random_bytes,
update,
tick_rate: tick_rate as u8,
tick_zero,
}))
.unwrap();
if let Err(err) = writer.write_all(&bytes[..]).await {
eprintln!("{}", err);
return;
}
println!("wrote init message");
loop {
match rx.recv().await {
Some(msg) => {
if let Err(_) = writer
.write_all(bincode::serialize(&msg).unwrap().as_slice())
.await
{
break;
}
}
None => break,
};
}
});
}
fn step(players: &mut Arena<Player>, bullets: &mut Arena<Bullet>, dt: f64) {
// Apply player impulse.
for (_, player) in players.iter_mut() {
let acceleration = 64.0;
let max_velocity = 16.0;
let friction = 16.0;
// Acceleration ranges from `friction` to `friction + acceleration`,
// and is inversely proportional to the projection of the current velocity onto the input vector.
let acceleration_index = player.player.velocity.dot(&player.input) / max_velocity;
let acceleration_index = if acceleration_index < 0.0 {
0.0
} else {
acceleration_index.sqrt()
};
let adjusted_acceleration = friction + acceleration * (1.0 - acceleration_index);
player.player.velocity += adjusted_acceleration * dt * player.input;
let dampened_velocity_unclamped = player.player.velocity.magnitude() - dt * friction;
let dampened_velocity = if dampened_velocity_unclamped < 0.0 {
0.0
} else {
dampened_velocity_unclamped
};
let velocity_unit = player
.player
.velocity
.try_normalize(0.0)
.unwrap_or(Vector2::new(0.0, 0.0));
player.player.velocity = dampened_velocity * velocity_unit;
player.player.position += dt * player.player.velocity;
}
// Remove expired bullets.
let bullets_to_remove: Vec<_> = bullets
.iter()
.filter(|(_, b)| b.lifetime > 1.0)
.map(|(idx, _)| idx)
.collect();
for idx in bullets_to_remove.iter() {
bullets.remove(*idx);
}
// Fire bullets.
for (_, player) in players.iter_mut().filter(|(_, p)| p.firing) {
let rof = 30.0;
player.fire_counter += rof * dt;
if player.fire_counter >= 1.0 {
player.fire_counter %= 1.0;
let idx = match bullets.try_insert(Bullet {
bullet: game::Bullet {
id: 0,
player_id: player.player.id,
position: player.player.position,
angle: player.angle,
radius: 0.5,
},
velocity: 32.0 * Vector2::new(player.angle.cos(), player.angle.sin()),
lifetime: 0.0,
}) {
Ok(idx) => idx,
Err(_) => {
eprintln!("too many bullets!");
break;
}
};
// Set the user ID to the arena index.
let raw_parts = idx.into_raw_parts();
bullets[idx].bullet.id = ((raw_parts.0 & 10) | ((raw_parts.1 as usize) << 10)) as u16;
}
}
// Update bullets.
for (_, bullet) in bullets.iter_mut() {
bullet.bullet.position += dt * bullet.velocity;
bullet.lifetime += dt;
}
// Manage collisions.
// We have to collect the idxs to avoid borrowing `players`.
let idxs: Vec<_> = players.iter().map(|(idx, _)| idx).collect();
let idx_pairs = idxs
.iter()
.map(|a| idxs.iter().map(move |b| (a, b)))
.flatten()
.filter(|(a, b)| a.into_raw_parts().0 < b.into_raw_parts().0);
for (a, b) in idx_pairs {
let (a, b) = players.get2_mut(*a, *b);
let a = a.unwrap();
let b = b.unwrap();
let distance = match a.player.position - b.player.position {
v if v.x == 0.0 && v.y == 0.0 => Vector2::new(0.001, 0.001),
v => v,
};
let max_distance = a.player.radius + b.player.radius;
if distance.magnitude_squared() >= max_distance.powi(2) {
continue; // No collision.
}
let displacement_unit = distance.try_normalize(0.0).unwrap();
let displacement = displacement_unit * (max_distance - distance.magnitude());
a.player.position += 0.5 * displacement;
b.player.position += -0.5 * displacement;
let momentum = a.player.velocity.magnitude() + b.player.velocity.magnitude();
let elasticity = 2.0;
a.player.velocity = 0.5 * elasticity * momentum * displacement_unit;
b.player.velocity = -0.5 * elasticity * momentum * displacement_unit;
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> {
let port: u32 = match args().nth(1).and_then(|s| s.parse().ok()) {
Some(port) => port,
None => {
eprintln!("Usage: {} PORT", args().nth(0).unwrap());
return Ok(());
}
};
let mut players = Arena::with_capacity(16);
let mut bullets = Arena::with_capacity(1024);
let tick_rate = 60;
let mut ticker = interval(Duration::from_secs(1) / tick_rate);
let snapshot_rate = 10;
let mut snapshot_ticker = interval(Duration::from_secs(1) / snapshot_rate);
let mut tcp_listener = TcpListener::bind(format!("0.0.0.0:{}", port)).await?;
let mut udp_socket = UdpSocket::bind(format!("0.0.0.0:{}", port)).await?;
// tcp_rx is our global receiver for TCP events.
// This means that each player holds a copy of tcp_tx which packets are passed to.
let (tcp_tx, mut tcp_rx) = channel(4);
let mut tick = Wrapping(0);
let tick_zero = SystemTime::now();
loop {
const MAX_PACKET_SIZE_PLUS_ONE: usize = 64;
let mut buf | random_line_split |
|
main.rs | ,
firing: false,
fire_counter: 0.0,
}) {
Ok(idx) => idx,
Err(_) => {
println!("rejecting connection; too many players");
return;
}
};
// Set the user ID to some combinaiton of the arena index and generation.
let id = idx.into_raw_parts().0 as u8;
players[idx].player.id = id;
// TODO(jack) Broadcast PlayerLeft messages.
// Broadcast PlayerJoined messages.
let mut tcp_txs: Vec<_> = players
.iter()
.filter(|(other_idx, _)| *other_idx != idx)
.map(|(_, p)| p.tcp_tx.clone())
.collect();
let msg = game::TcpClientMessage::PlayerJoined(id);
tokio::spawn(async move {
let join_handles = tcp_txs.iter_mut().map(|tcp_tx| tcp_tx.send(msg.clone()));
join_all(join_handles).await;
});
// Start tasks to read-from / write-to the TCP socket.
let (mut reader, mut writer) = stream.into_split();
tokio::spawn(async move {
loop {
const MAX_PACKET_SIZE_PLUS_ONE: usize = 64;
let mut buf: [u8; MAX_PACKET_SIZE_PLUS_ONE] = [0; MAX_PACKET_SIZE_PLUS_ONE];
let num_bytes = match reader.read(&mut buf).await {
Ok(0) => break,
Ok(MAX_PACKET_SIZE_PLUS_ONE) => break,
Err(err) => {
eprintln!("{}", err);
break;
}
Ok(num_bytes) => num_bytes,
};
match bincode::deserialize(&buf[..num_bytes]) {
Ok(msg) => match internal_tcp_tx.send((idx, Some(msg))).await {
Ok(_) => (),
Err(_) => break,
},
Err(err) => {
eprintln!("{}", err);
break;
}
};
}
// One consequence of every client publishing TCP packets to the same channel
// is that we don't know when any one disconnects.
// We signal it here with a `None`.
internal_tcp_tx.send((idx, None)).await.ok();
});
let random_bytes = players[idx].random_bytes.clone();
let update = game::WorldUpdate {
tick,
players: players.iter().map(|(_, p)| p.player).collect(),
bullets: bullets.iter().map(|(_, b)| b.bullet).collect(),
};
tokio::spawn(async move {
// Send the init packet.
// For now, this will just include a random sequence of bytes.
// We'll then wait for the random sequence of bytes via UDP to identify the client's external port number.
let bytes = bincode::serialize(&game::TcpClientMessage::Init(game::ClientInit {
id,
random_bytes,
update,
tick_rate: tick_rate as u8,
tick_zero,
}))
.unwrap();
if let Err(err) = writer.write_all(&bytes[..]).await {
eprintln!("{}", err);
return;
}
println!("wrote init message");
loop {
match rx.recv().await {
Some(msg) => {
if let Err(_) = writer
.write_all(bincode::serialize(&msg).unwrap().as_slice())
.await
{
break;
}
}
None => break,
};
}
});
}
fn step(players: &mut Arena<Player>, bullets: &mut Arena<Bullet>, dt: f64) | 0.0
} else {
dampened_velocity_unclamped
};
let velocity_unit = player
.player
.velocity
.try_normalize(0.0)
.unwrap_or(Vector2::new(0.0, 0.0));
player.player.velocity = dampened_velocity * velocity_unit;
player.player.position += dt * player.player.velocity;
}
// Remove expired bullets.
let bullets_to_remove: Vec<_> = bullets
.iter()
.filter(|(_, b)| b.lifetime > 1.0)
.map(|(idx, _)| idx)
.collect();
for idx in bullets_to_remove.iter() {
bullets.remove(*idx);
}
// Fire bullets.
for (_, player) in players.iter_mut().filter(|(_, p)| p.firing) {
let rof = 30.0;
player.fire_counter += rof * dt;
if player.fire_counter >= 1.0 {
player.fire_counter %= 1.0;
let idx = match bullets.try_insert(Bullet {
bullet: game::Bullet {
id: 0,
player_id: player.player.id,
position: player.player.position,
angle: player.angle,
radius: 0.5,
},
velocity: 32.0 * Vector2::new(player.angle.cos(), player.angle.sin()),
lifetime: 0.0,
}) {
Ok(idx) => idx,
Err(_) => {
eprintln!("too many bullets!");
break;
}
};
// Set the user ID to the arena index.
let raw_parts = idx.into_raw_parts();
bullets[idx].bullet.id = ((raw_parts.0 & 10) | ((raw_parts.1 as usize) << 10)) as u16;
}
}
// Update bullets.
for (_, bullet) in bullets.iter_mut() {
bullet.bullet.position += dt * bullet.velocity;
bullet.lifetime += dt;
}
// Manage collisions.
// We have to collect the idxs to avoid borrowing `players`.
let idxs: Vec<_> = players.iter().map(|(idx, _)| idx).collect();
let idx_pairs = idxs
.iter()
.map(|a| idxs.iter().map(move |b| (a, b)))
.flatten()
.filter(|(a, b)| a.into_raw_parts().0 < b.into_raw_parts().0);
for (a, b) in idx_pairs {
let (a, b) = players.get2_mut(*a, *b);
let a = a.unwrap();
let b = b.unwrap();
let distance = match a.player.position - b.player.position {
v if v.x == 0.0 && v.y == 0.0 => Vector2::new(0.001, 0.001),
v => v,
};
let max_distance = a.player.radius + b.player.radius;
if distance.magnitude_squared() >= max_distance.powi(2) {
continue; // No collision.
}
let displacement_unit = distance.try_normalize(0.0).unwrap();
let displacement = displacement_unit * (max_distance - distance.magnitude());
a.player.position += 0.5 * displacement;
b.player.position += -0.5 * displacement;
let momentum = a.player.velocity.magnitude() + b.player.velocity.magnitude();
let elasticity = 2.0;
a.player.velocity = 0.5 * elasticity * momentum * displacement_unit;
b.player.velocity = -0.5 * elasticity * momentum * displacement_unit;
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> {
let port: u32 = match args().nth(1).and_then(|s| s.parse().ok()) {
Some(port) => port,
None => {
eprintln!("Usage: {} PORT", args().nth(0).unwrap());
return Ok(());
}
};
let mut players = Arena::with_capacity(16);
let mut bullets = Arena::with_capacity(1024);
let tick_rate = 60;
let mut ticker = interval(Duration::from_secs(1) / tick_rate);
let snapshot_rate = 10;
let mut snapshot_ticker = interval(Duration::from_secs(1) / snapshot_rate);
let mut tcp_listener = TcpListener::bind(format!("0.0.0.0:{}", port)).await?;
let mut udp_socket = UdpSocket::bind(format!("0.0.0.0:{}", port)).await?;
// tcp_rx is our global receiver for TCP events.
// This means that each player holds a copy of tcp_tx which packets are passed to.
let (tcp_tx, mut tcp_rx) = channel(4);
let mut tick = Wrapping(0);
let tick_zero = SystemTime::now();
loop {
const MAX_PACKET_SIZE_PLUS_ONE: usize = 64;
let mut | {
// Apply player impulse.
for (_, player) in players.iter_mut() {
let acceleration = 64.0;
let max_velocity = 16.0;
let friction = 16.0;
// Acceleration ranges from `friction` to `friction + acceleration`,
// and is inversely proportional to the projection of the current velocity onto the input vector.
let acceleration_index = player.player.velocity.dot(&player.input) / max_velocity;
let acceleration_index = if acceleration_index < 0.0 {
0.0
} else {
acceleration_index.sqrt()
};
let adjusted_acceleration = friction + acceleration * (1.0 - acceleration_index);
player.player.velocity += adjusted_acceleration * dt * player.input;
let dampened_velocity_unclamped = player.player.velocity.magnitude() - dt * friction;
let dampened_velocity = if dampened_velocity_unclamped < 0.0 { | identifier_body |
main.rs | (
players: &mut Arena<Player>,
bullets: &Arena<Bullet>,
stream: TcpStream,
mut internal_tcp_tx: Sender<(Index, Option<game::TcpServerMessage>)>,
tick_rate: u32,
tick_zero: SystemTime,
tick: game::Tick,
) {
println!("connection!");
let (tx, mut rx) = channel(4);
let idx = match players.try_insert(Player {
player: game::Player {
id: 0, // Set below.
radius: 1.0,
position: Vector2::new(0.0, 0.0),
velocity: Vector2::new(0.0, 0.0),
},
tcp_tx: tx,
udp_addr: None,
random_bytes: rand::random(),
input: Vector2::new(0.0, 0.0),
angle: 0.0,
firing: false,
fire_counter: 0.0,
}) {
Ok(idx) => idx,
Err(_) => {
println!("rejecting connection; too many players");
return;
}
};
// Set the user ID to some combinaiton of the arena index and generation.
let id = idx.into_raw_parts().0 as u8;
players[idx].player.id = id;
// TODO(jack) Broadcast PlayerLeft messages.
// Broadcast PlayerJoined messages.
let mut tcp_txs: Vec<_> = players
.iter()
.filter(|(other_idx, _)| *other_idx != idx)
.map(|(_, p)| p.tcp_tx.clone())
.collect();
let msg = game::TcpClientMessage::PlayerJoined(id);
tokio::spawn(async move {
let join_handles = tcp_txs.iter_mut().map(|tcp_tx| tcp_tx.send(msg.clone()));
join_all(join_handles).await;
});
// Start tasks to read-from / write-to the TCP socket.
let (mut reader, mut writer) = stream.into_split();
tokio::spawn(async move {
loop {
const MAX_PACKET_SIZE_PLUS_ONE: usize = 64;
let mut buf: [u8; MAX_PACKET_SIZE_PLUS_ONE] = [0; MAX_PACKET_SIZE_PLUS_ONE];
let num_bytes = match reader.read(&mut buf).await {
Ok(0) => break,
Ok(MAX_PACKET_SIZE_PLUS_ONE) => break,
Err(err) => {
eprintln!("{}", err);
break;
}
Ok(num_bytes) => num_bytes,
};
match bincode::deserialize(&buf[..num_bytes]) {
Ok(msg) => match internal_tcp_tx.send((idx, Some(msg))).await {
Ok(_) => (),
Err(_) => break,
},
Err(err) => {
eprintln!("{}", err);
break;
}
};
}
// One consequence of every client publishing TCP packets to the same channel
// is that we don't know when any one disconnects.
// We signal it here with a `None`.
internal_tcp_tx.send((idx, None)).await.ok();
});
let random_bytes = players[idx].random_bytes.clone();
let update = game::WorldUpdate {
tick,
players: players.iter().map(|(_, p)| p.player).collect(),
bullets: bullets.iter().map(|(_, b)| b.bullet).collect(),
};
tokio::spawn(async move {
// Send the init packet.
// For now, this will just include a random sequence of bytes.
// We'll then wait for the random sequence of bytes via UDP to identify the client's external port number.
let bytes = bincode::serialize(&game::TcpClientMessage::Init(game::ClientInit {
id,
random_bytes,
update,
tick_rate: tick_rate as u8,
tick_zero,
}))
.unwrap();
if let Err(err) = writer.write_all(&bytes[..]).await {
eprintln!("{}", err);
return;
}
println!("wrote init message");
loop {
match rx.recv().await {
Some(msg) => {
if let Err(_) = writer
.write_all(bincode::serialize(&msg).unwrap().as_slice())
.await
{
break;
}
}
None => break,
};
}
});
}
fn step(players: &mut Arena<Player>, bullets: &mut Arena<Bullet>, dt: f64) {
// Apply player impulse.
for (_, player) in players.iter_mut() {
let acceleration = 64.0;
let max_velocity = 16.0;
let friction = 16.0;
// Acceleration ranges from `friction` to `friction + acceleration`,
// and is inversely proportional to the projection of the current velocity onto the input vector.
let acceleration_index = player.player.velocity.dot(&player.input) / max_velocity;
let acceleration_index = if acceleration_index < 0.0 {
0.0
} else {
acceleration_index.sqrt()
};
let adjusted_acceleration = friction + acceleration * (1.0 - acceleration_index);
player.player.velocity += adjusted_acceleration * dt * player.input;
let dampened_velocity_unclamped = player.player.velocity.magnitude() - dt * friction;
let dampened_velocity = if dampened_velocity_unclamped < 0.0 {
0.0
} else {
dampened_velocity_unclamped
};
let velocity_unit = player
.player
.velocity
.try_normalize(0.0)
.unwrap_or(Vector2::new(0.0, 0.0));
player.player.velocity = dampened_velocity * velocity_unit;
player.player.position += dt * player.player.velocity;
}
// Remove expired bullets.
let bullets_to_remove: Vec<_> = bullets
.iter()
.filter(|(_, b)| b.lifetime > 1.0)
.map(|(idx, _)| idx)
.collect();
for idx in bullets_to_remove.iter() {
bullets.remove(*idx);
}
// Fire bullets.
for (_, player) in players.iter_mut().filter(|(_, p)| p.firing) {
let rof = 30.0;
player.fire_counter += rof * dt;
if player.fire_counter >= 1.0 {
player.fire_counter %= 1.0;
let idx = match bullets.try_insert(Bullet {
bullet: game::Bullet {
id: 0,
player_id: player.player.id,
position: player.player.position,
angle: player.angle,
radius: 0.5,
},
velocity: 32.0 * Vector2::new(player.angle.cos(), player.angle.sin()),
lifetime: 0.0,
}) {
Ok(idx) => idx,
Err(_) => {
eprintln!("too many bullets!");
break;
}
};
// Set the user ID to the arena index.
let raw_parts = idx.into_raw_parts();
bullets[idx].bullet.id = ((raw_parts.0 & 10) | ((raw_parts.1 as usize) << 10)) as u16;
}
}
// Update bullets.
for (_, bullet) in bullets.iter_mut() {
bullet.bullet.position += dt * bullet.velocity;
bullet.lifetime += dt;
}
// Manage collisions.
// We have to collect the idxs to avoid borrowing `players`.
let idxs: Vec<_> = players.iter().map(|(idx, _)| idx).collect();
let idx_pairs = idxs
.iter()
.map(|a| idxs.iter().map(move |b| (a, b)))
.flatten()
.filter(|(a, b)| a.into_raw_parts().0 < b.into_raw_parts().0);
for (a, b) in idx_pairs {
let (a, b) = players.get2_mut(*a, *b);
let a = a.unwrap();
let b = b.unwrap();
let distance = match a.player.position - b.player.position {
v if v.x == 0.0 && v.y == 0.0 => Vector2::new(0.001, 0.001),
v => v,
};
let max_distance = a.player.radius + b.player.radius;
if distance.magnitude_squared() >= max_distance.powi(2) {
continue; // No collision.
}
let displacement_unit = distance.try_normalize(0.0).unwrap();
let displacement = displacement_unit * (max_distance - distance.magnitude());
a.player.position += 0.5 * displacement;
b.player.position += -0.5 * displacement;
let momentum = a.player.velocity.magnitude() + b.player.velocity.magnitude();
let elasticity = 2.0;
a.player.velocity = 0.5 * elasticity * momentum * displacement_unit;
b.player.velocity = -0.5 * elasticity * momentum * displacement_unit;
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> {
let port: u32 = match args().nth(1).and_then(|s| s.parse().ok()) {
Some(port) => port,
None => {
eprintln!("Usage: {} PORT", args().nth(0).unwrap());
return Ok(());
}
};
let mut players = Arena::with_capacity(16);
let mut bullets = Arena:: | accept | identifier_name |
|
Loan Eligibility Prediction_Benchmark_ML_Algorithm.py |
# ### Use GridSearchCV for finding the best model with the best hyperparameters
# - ### Build models
# - ### Create Parameter Grid
# - ### Run GridSearchCV
# - ### Choose the best model with the best hyperparameter
# - ### Give the best accuracy
# - ### Also, benchmark the best accuracy that you could get for every classification algorithm asked above
# #### Your final output will be something like this:
# - Best algorithm accuracy
# - Best hyperparameter accuracy for every algorithm
#
# **Table 1 (Algorithm wise best model with best hyperparameter)**
#
# Algorithm | Accuracy | Hyperparameters
# - DT
# - KNN
# - LR
# - SVM
# - RF
# - anyother
#
# **Table 2 (Best overall)**
#
# Algorithm | Accuracy | Hyperparameters
#
#
# ### Submission
# - Submit Notebook containing all saved ran code with outputs
# - Document with the above two tables
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
# In[4]:
import seaborn as sns
sns.set_theme(style="darkgrid")
ax = sns.countplot(x="Loan_Status", data=data)
# Data is imbalanced
# In[5]:
data=pd.read_csv('https://raw.githubusercontent.com/subashgandyer/datasets/main/loan_train.csv')
data
# checking missing values
# In[6]:
data.isna().sum()
# In[7]:
data['Gender']= data['Gender'].fillna('U')
data['Married']= data['Married'].fillna('U')
data['Self_Employed']= data['Self_Employed'].fillna('U')
# In[8]:
data.isna().sum()
# In[9]:
from numpy import NaN
data[['LoanAmount','Loan_Amount_Term','Credit_History']] = data[['LoanAmount','Loan_Amount_Term','Credit_History']].replace(0, NaN)
# In[10]:
data.fillna(data.mean(), inplace=True)
data
# In[11]:
data.info()
# In[12]:
data.Dependents.value_counts()
# # Handling Categorical Variable
# In[13]:
from sklearn.preprocessing import LabelEncoder
labelencoder_X=LabelEncoder()
xm=data.apply(LabelEncoder().fit_transform)
xm
# In[14]:
X=xm.drop(['Loan_Status'], axis=1)
X
# In[15]:
y_new=xm.iloc[:,12]
y_new
# In[16]:
test=pd.read_csv('https://raw.githubusercontent.com/subashgandyer/datasets/main/loan_test.csv')
# In[17]:
test.isna().sum()
# In[18]:
test['Gender']= test['Gender'].fillna('U')
test['Self_Employed']= test['Self_Employed'].fillna('U')
# In[19]:
test.isna().sum()
# In[20]:
from numpy import NaN
test[['LoanAmount','Loan_Amount_Term','Credit_History']] = test[['LoanAmount','Loan_Amount_Term','Credit_History']].replace(0, NaN)
# In[21]:
test.fillna(test.mean(), inplace=True)
test.isna().sum()
# In[22]:
test.Dependents.value_counts()
# In[23]:
from sklearn.preprocessing import LabelEncoder
labelencoder_X=LabelEncoder()
xm_new=test.apply(LabelEncoder().fit_transform)
xm_new
# In[24]:
X.columns
# In[25]:
X_train_new=X[['Loan_ID', 'Gender', 'Married', 'Dependents', 'Education',
'Self_Employed', 'ApplicantIncome', 'CoapplicantIncome', 'LoanAmount',
'Loan_Amount_Term', 'Credit_History', 'Property_Area']]
X_train_new
y_train_new=xm.iloc[:,12]
y_train_new
# In[26]:
X_train_new
# In[27]:
y_train_new
# In[28]:
X_test_new= xm_new
X_test_new
# In[29]:
from sklearn.model_selection import train_test_split
#X_train, X_test, y_train, y_test = train_test_split(X, y_new, test_size = 0.30, random_state=5)
# In[30]:
n = 247
# Dropping last n rows using drop
y_new.drop(y_new.tail(n).index,
inplace = True)
# Printing dataframe
print(y_new)
# In[31]:
print(X_train_new.shape)
print(X_test_new.shape)
print(y_train_new.shape)
print(y_new.shape)
# In[32]:
y_test_new= y_new
y_test_new
# # Model Building
# In[33]:
from sklearn.neighbors import KNeighborsClassifier
knn_classifier = KNeighborsClassifier()
# In[34]:
knn_classifier.fit(X_train_new, y_train_new)
# In[35]:
knn_predictions = knn_classifier.predict(X_test_new)
# In[37]:
print(knn_classifier.score(X_test_new, y_test_new))
print(knn_classifier.score(X_train_new, y_train_new))
# # knn_classifier
# In[38]:
from sklearn.model_selection import GridSearchCV
# In[39]:
grid_params= {'n_neighbors':[3,5,11,19],'weights':['uniform','distance'],'metric':['euclidean','manhattan']
}
# In[40]:
gridsearch= GridSearchCV(knn_classifier,grid_params, verbose=1,cv=3,n_jobs=-1)
# In[41]:
gs_results=gridsearch.fit(X_train_new, y_train_new)
# In[42]:
gs_results.best_score_
# In[43]:
gs_results.best_estimator_
# In[44]:
gs_results.best_params_
# # Random Forest With GridsearchCv
# In[45]:
from sklearn.ensemble import RandomForestClassifier
# Create the parameter grid based on the results of random search
param_grid = {
'bootstrap': [True],
'max_depth': [80, 90, 100, 110],
'max_features': [2, 3],
'min_samples_leaf': [3, 4, 5],
'min_samples_split': [8, 10, 12],
'n_estimators': [100, 200, 300, 1000]
}
# Create a based model
rf = RandomForestClassifier()
# Instantiate the grid search model
grid_search = GridSearchCV(estimator = rf, param_grid = param_grid,
cv = 3, n_jobs = -1, verbose = 2)
# In[46]:
rf_results=grid_search.fit(X_train_new, y_train_new)
# In[58]:
rf_results.best_score_
# In[47]:
rf_results.best_params_
# # Decision Tree with GridSearchCv
# In[48]:
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
params = {'max_leaf_nodes': list(range(2, 100)), 'min_samples_split': [2, 3, 4]}
grid_search_cv = GridSearchCV(DecisionTreeClassifier(random_state=42), params, verbose=1, cv=3)
dt=grid_search_cv.fit(X_train_new, y_train_new)
# In[49]:
grid_search_cv.best_params_
# In[50]:
grid_search_cv.best_score_
# # Logistic Regression
# In[51]:
from sklearn.linear_model import LogisticRegression
import numpy as np
model=LogisticRegression()
# In[52]:
from sklearn.model_selection import RepeatedStratifiedKFold
# Create grid search object
solvers = ['newton-cg', 'lbfgs', 'liblinear']
penalty = ['l2']
c_values = [100, 10, 1.0, 0.1, 0.01]
# define grid search
grid = dict(solver=solvers,penalty=penalty,C=c_values)
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
grid_search = GridSearchCV(estimator=model, param_grid=grid, n_jobs=-1, cv=cv, scoring='accuracy',error_score=0)
lg = grid_search.fit(X_train_new, y_train_new)
# In[54]:
lg.best_score_
# In[53]:
lg.best_params_
# # svm
# In[ ]:
from sklearn.svm import SVC
from sklearn.metrics import classification_report, confusion_matrix
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# In[ ]:
from sklearn.model_selection import GridSearchCV
param_grid = { 'C':[0.1,1,100,1000],'kernel':['rbf','poly','sigmoid','linear'],'degree':[1,2,3,4,5,6],'gamma': [1, 0.1, 0.01, 0.001, 0.0001]}
grid = GridSearchCV(SVC(),param_grid)
# In[ ]:
grid.fit(X_train_new,y_train_new)
# In[ | # - KNN
# - Logistic Regression
# - SVM
# - Random Forest
# - Any other algorithm of your choice | random_line_split |
|
main.rs | 're looking for... may be in logs, or in generated C++, or generated .rs")
.takes_value(true),
)
.arg(
Arg::new("creduce")
.long("creduce")
.value_name("PATH")
.help("creduce binary location")
.default_value("creduce")
.takes_value(true),
)
.arg(
Arg::new("output")
.short('o')
.long("output")
.value_name("OUTPUT")
.help("where to write minimized output")
.takes_value(true),
)
.arg(
Arg::new("gen-cmd")
.short('g')
.long("gen-cmd")
.value_name("GEN-CMD")
.help("where to find autocxx-gen")
.default_value(&default_gen_cmd)
.takes_value(true),
)
.arg(
Arg::new("rustc")
.long("rustc")
.value_name("RUSTC")
.help("where to find rustc")
.default_value("rustc")
.takes_value(true),
)
.arg(
Arg::new("rlibs")
.long("rlibs")
.value_name("LIBDIR")
.help("where to find rlibs/rmetas for cxx and autocxx")
.default_values(default_rlibs)
.multiple_values(true)
.takes_value(true),
)
.arg(
Arg::new("keep")
.short('k')
.long("keep-dir")
.help("keep the temporary directory for debugging purposes"),
)
.arg(
Arg::new("clang-args")
.short('c')
.long("clang-arg")
.multiple_occurrences(true)
.value_name("CLANG_ARG")
.help("Extra arguments to pass to Clang"),
)
.arg(
Arg::new("creduce-args")
.long("creduce-arg")
.multiple_occurrences(true)
.value_name("CREDUCE_ARG")
.help("Extra arguments to pass to Clang"),
)
.arg(
Arg::new("no-precompile")
.long("no-precompile")
.help("Do not precompile the C++ header before passing to autocxxgen"),
)
.arg(
Arg::new("no-postcompile")
.long("no-postcompile")
.help("Do not post-compile the C++ generated by autocxxgen"),
)
.arg(
Arg::new("no-rustc")
.long("no-rustc")
.help("Do not compile the rust generated by autocxxgen"),
)
.arg(
Arg::new("suppress-cxx-inclusions")
.long("suppress-cxx-inclusions")
.takes_value(true)
.possible_value("yes")
.possible_value("no")
.possible_value("auto")
.default_value("auto")
.help("Whether the preprocessed header already includes cxx.h. If so, we'll try to suppress the natural behavior of cxx to include duplicate definitions of some of the types within gen0.cc.")
)
.arg_required_else_help(true)
.get_matches();
run(matches).unwrap();
}
fn run(matches: ArgMatches) -> Result<(), std::io::Error> {
let keep_tmp = matches.is_present("keep");
let tmp_dir = TempDir::new()?;
let r = do_run(matches, &tmp_dir);
if keep_tmp {
println!(
"Keeping temp dir created at: {}",
tmp_dir.into_path().to_str().unwrap()
);
}
r
}
#[derive(serde_derive::Deserialize)]
struct | {
config: String,
header: String,
}
fn do_run(matches: ArgMatches, tmp_dir: &TempDir) -> Result<(), std::io::Error> {
let rs_path = tmp_dir.path().join("input.rs");
let concat_path = tmp_dir.path().join("concat.h");
match matches.subcommand_matches("repro") {
None => {
let submatches = matches.subcommand_matches("file").unwrap();
let incs: Vec<_> = submatches
.values_of("inc")
.unwrap_or_default()
.map(PathBuf::from)
.collect();
let defs: Vec<_> = submatches.values_of("define").unwrap_or_default().collect();
let headers: Vec<_> = submatches.values_of("header").unwrap_or_default().collect();
assert!(!headers.is_empty());
let listing_path = tmp_dir.path().join("listing.h");
create_concatenated_header(&headers, &listing_path)?;
announce_progress(&format!(
"Preprocessing {listing_path:?} to {concat_path:?}"
));
preprocess(&listing_path, &concat_path, &incs, &defs)?;
let directives: Vec<_> = std::iter::once("#include \"concat.h\"\n".to_string())
.chain(
submatches
.values_of("directive")
.unwrap_or_default()
.map(|s| format!("{s}\n")),
)
.collect();
create_rs_file(&rs_path, &directives)?;
}
Some(submatches) => {
let case: ReproCase = serde_json::from_reader(File::open(PathBuf::from(
submatches.value_of("repro").unwrap(),
))?)
.unwrap();
// Replace the headers in the config
let mut config: IncludeCppConfig = syn::parse_str(&case.config).unwrap();
config.replace_included_headers("concat.h");
create_file(
&rs_path,
&format!("autocxx::include_cpp!({});", config.to_token_stream()),
)?;
if let Some(header) = submatches.value_of("header") {
std::fs::copy(PathBuf::from(header), &concat_path)?;
} else {
create_file(&concat_path, &case.header)?
}
}
}
let suppress_cxx_classes = match matches.value_of("suppress-cxx-inclusions").unwrap() {
"yes" => true,
"no" => false,
"auto" => detect_cxx_h(&concat_path)?,
_ => panic!("unexpected value"),
};
let cxx_suppressions = if suppress_cxx_classes {
get_cxx_suppressions()
} else {
Vec::new()
};
let extra_clang_args: Vec<_> = matches
.values_of("clang-args")
.unwrap_or_default()
.map(Cow::Borrowed)
.chain(cxx_suppressions.into_iter().map(Cow::Owned))
.collect();
let extra_clang_args: Vec<&str> = extra_clang_args.iter().map(|s| s.as_ref()).collect_vec();
let gen_cmd = matches.value_of("gen-cmd").unwrap();
if !Path::new(gen_cmd).exists() {
panic!(
"autocxx-gen not found in {gen_cmd}. hint: autocxx-reduce --gen-cmd /path/to/autocxx-gen"
);
}
run_sample_gen_cmd(gen_cmd, &rs_path, tmp_dir.path(), &extra_clang_args)?;
// Create and run an interestingness test which does not filter its output through grep.
let demo_interestingness_test_dir = tmp_dir.path().join("demo-interestingness-test");
std::fs::create_dir(&demo_interestingness_test_dir).unwrap();
let interestingness_test = demo_interestingness_test_dir.join("test-demo.sh");
create_interestingness_test(
&matches,
gen_cmd,
&interestingness_test,
None,
&rs_path,
&extra_clang_args,
)?;
let demo_dir_concat_path = demo_interestingness_test_dir.join("concat.h");
std::fs::copy(&concat_path, demo_dir_concat_path).unwrap();
run_demo_interestingness_test(&demo_interestingness_test_dir, &interestingness_test).unwrap();
// Now the main interestingness test
let interestingness_test = tmp_dir.path().join("test.sh");
create_interestingness_test(
&matches,
gen_cmd,
&interestingness_test,
Some(matches.value_of("problem").unwrap()),
&rs_path,
&extra_clang_args,
)?;
run_creduce(
matches.value_of("creduce").unwrap(),
&interestingness_test,
&concat_path,
matches.values_of("creduce-args").unwrap_or_default(),
);
announce_progress("creduce completed");
let output_path = matches.value_of("output");
match output_path {
None => print_minimized_case(&concat_path)?,
Some(output_path) => {
std::fs::copy(&concat_path, PathBuf::from(output_path))?;
}
};
Ok(())
}
/// Try to detect whether the preprocessed source code already contains
/// a preprocessed version of cxx.h. This is hard because all the comments
/// and preprocessor symbols may have been removed, and in fact if we're
/// part way through reduction, parts of the code may have been removed too.
fn detect_cxx_h(concat_path: &Path) -> Result<bool, std::io::Error> {
let haystack = std::fs::read_to_string(concat_path)?;
Ok(["class Box", "class Vec", "class Slice"]
.iter()
.all(|needle| haystack.contains(needle)))
}
fn | ReproCase | identifier_name |
main.rs | 're looking for... may be in logs, or in generated C++, or generated .rs")
.takes_value(true),
)
.arg(
Arg::new("creduce")
.long("creduce")
.value_name("PATH")
.help("creduce binary location")
.default_value("creduce")
.takes_value(true),
)
.arg(
Arg::new("output")
.short('o')
.long("output")
.value_name("OUTPUT")
.help("where to write minimized output")
.takes_value(true),
)
.arg(
Arg::new("gen-cmd")
.short('g')
.long("gen-cmd")
.value_name("GEN-CMD")
.help("where to find autocxx-gen")
.default_value(&default_gen_cmd)
.takes_value(true),
)
.arg(
Arg::new("rustc")
.long("rustc")
.value_name("RUSTC")
.help("where to find rustc")
.default_value("rustc")
.takes_value(true),
)
.arg(
Arg::new("rlibs")
.long("rlibs")
.value_name("LIBDIR")
.help("where to find rlibs/rmetas for cxx and autocxx")
.default_values(default_rlibs)
.multiple_values(true)
.takes_value(true),
)
.arg(
Arg::new("keep")
.short('k')
.long("keep-dir")
.help("keep the temporary directory for debugging purposes"),
)
.arg(
Arg::new("clang-args")
.short('c')
.long("clang-arg")
.multiple_occurrences(true)
.value_name("CLANG_ARG")
.help("Extra arguments to pass to Clang"),
)
.arg(
Arg::new("creduce-args")
.long("creduce-arg")
.multiple_occurrences(true)
.value_name("CREDUCE_ARG")
.help("Extra arguments to pass to Clang"),
)
.arg(
Arg::new("no-precompile")
.long("no-precompile")
.help("Do not precompile the C++ header before passing to autocxxgen"),
)
.arg(
Arg::new("no-postcompile")
.long("no-postcompile")
.help("Do not post-compile the C++ generated by autocxxgen"),
)
.arg(
Arg::new("no-rustc")
.long("no-rustc")
.help("Do not compile the rust generated by autocxxgen"),
)
.arg(
Arg::new("suppress-cxx-inclusions")
.long("suppress-cxx-inclusions")
.takes_value(true)
.possible_value("yes")
.possible_value("no")
.possible_value("auto")
.default_value("auto")
.help("Whether the preprocessed header already includes cxx.h. If so, we'll try to suppress the natural behavior of cxx to include duplicate definitions of some of the types within gen0.cc.")
)
.arg_required_else_help(true)
.get_matches();
run(matches).unwrap();
}
fn run(matches: ArgMatches) -> Result<(), std::io::Error> {
let keep_tmp = matches.is_present("keep");
let tmp_dir = TempDir::new()?;
let r = do_run(matches, &tmp_dir);
if keep_tmp {
println!(
"Keeping temp dir created at: {}",
tmp_dir.into_path().to_str().unwrap()
);
}
r
}
#[derive(serde_derive::Deserialize)]
struct ReproCase {
config: String,
header: String,
}
fn do_run(matches: ArgMatches, tmp_dir: &TempDir) -> Result<(), std::io::Error> {
let rs_path = tmp_dir.path().join("input.rs");
let concat_path = tmp_dir.path().join("concat.h");
match matches.subcommand_matches("repro") {
None => {
let submatches = matches.subcommand_matches("file").unwrap();
let incs: Vec<_> = submatches
.values_of("inc")
.unwrap_or_default()
.map(PathBuf::from)
.collect();
let defs: Vec<_> = submatches.values_of("define").unwrap_or_default().collect();
let headers: Vec<_> = submatches.values_of("header").unwrap_or_default().collect();
assert!(!headers.is_empty());
let listing_path = tmp_dir.path().join("listing.h");
create_concatenated_header(&headers, &listing_path)?;
announce_progress(&format!(
"Preprocessing {listing_path:?} to {concat_path:?}"
));
preprocess(&listing_path, &concat_path, &incs, &defs)?;
let directives: Vec<_> = std::iter::once("#include \"concat.h\"\n".to_string())
.chain(
submatches
.values_of("directive")
.unwrap_or_default()
.map(|s| format!("{s}\n")),
)
.collect();
create_rs_file(&rs_path, &directives)?;
}
Some(submatches) => {
let case: ReproCase = serde_json::from_reader(File::open(PathBuf::from(
submatches.value_of("repro").unwrap(),
))?)
.unwrap();
// Replace the headers in the config
let mut config: IncludeCppConfig = syn::parse_str(&case.config).unwrap();
config.replace_included_headers("concat.h");
create_file(
&rs_path,
&format!("autocxx::include_cpp!({});", config.to_token_stream()),
)?;
if let Some(header) = submatches.value_of("header") {
std::fs::copy(PathBuf::from(header), &concat_path)?;
} else {
create_file(&concat_path, &case.header)?
}
}
}
let suppress_cxx_classes = match matches.value_of("suppress-cxx-inclusions").unwrap() {
"yes" => true,
"no" => false,
"auto" => detect_cxx_h(&concat_path)?,
_ => panic!("unexpected value"),
};
let cxx_suppressions = if suppress_cxx_classes {
get_cxx_suppressions()
} else {
Vec::new()
};
let extra_clang_args: Vec<_> = matches
.values_of("clang-args")
.unwrap_or_default()
.map(Cow::Borrowed)
.chain(cxx_suppressions.into_iter().map(Cow::Owned))
.collect();
let extra_clang_args: Vec<&str> = extra_clang_args.iter().map(|s| s.as_ref()).collect_vec();
let gen_cmd = matches.value_of("gen-cmd").unwrap();
if !Path::new(gen_cmd).exists() {
panic!(
"autocxx-gen not found in {gen_cmd}. hint: autocxx-reduce --gen-cmd /path/to/autocxx-gen"
);
}
run_sample_gen_cmd(gen_cmd, &rs_path, tmp_dir.path(), &extra_clang_args)?;
// Create and run an interestingness test which does not filter its output through grep.
let demo_interestingness_test_dir = tmp_dir.path().join("demo-interestingness-test");
std::fs::create_dir(&demo_interestingness_test_dir).unwrap();
let interestingness_test = demo_interestingness_test_dir.join("test-demo.sh");
create_interestingness_test(
&matches,
gen_cmd,
&interestingness_test,
None,
&rs_path,
&extra_clang_args,
)?;
let demo_dir_concat_path = demo_interestingness_test_dir.join("concat.h");
std::fs::copy(&concat_path, demo_dir_concat_path).unwrap();
run_demo_interestingness_test(&demo_interestingness_test_dir, &interestingness_test).unwrap();
// Now the main interestingness test
let interestingness_test = tmp_dir.path().join("test.sh");
create_interestingness_test(
&matches,
gen_cmd,
&interestingness_test,
Some(matches.value_of("problem").unwrap()),
&rs_path,
&extra_clang_args,
)?;
run_creduce(
matches.value_of("creduce").unwrap(),
&interestingness_test,
&concat_path,
matches.values_of("creduce-args").unwrap_or_default(),
);
announce_progress("creduce completed");
let output_path = matches.value_of("output");
match output_path {
None => print_minimized_case(&concat_path)?,
Some(output_path) => {
std::fs::copy(&concat_path, PathBuf::from(output_path))?;
}
};
Ok(())
}
/// Try to detect whether the preprocessed source code already contains
/// a preprocessed version of cxx.h. This is hard because all the comments
/// and preprocessor symbols may have been removed, and in fact if we're
/// part way through reduction, parts of the code may have been removed too.
fn detect_cxx_h(concat_path: &Path) -> Result<bool, std::io::Error> | {
let haystack = std::fs::read_to_string(concat_path)?;
Ok(["class Box", "class Vec", "class Slice"]
.iter()
.all(|needle| haystack.contains(needle)))
} | identifier_body |
|
main.rs | pro") {
None => {
let submatches = matches.subcommand_matches("file").unwrap();
let incs: Vec<_> = submatches
.values_of("inc")
.unwrap_or_default()
.map(PathBuf::from)
.collect();
let defs: Vec<_> = submatches.values_of("define").unwrap_or_default().collect();
let headers: Vec<_> = submatches.values_of("header").unwrap_or_default().collect();
assert!(!headers.is_empty());
let listing_path = tmp_dir.path().join("listing.h");
create_concatenated_header(&headers, &listing_path)?;
announce_progress(&format!(
"Preprocessing {listing_path:?} to {concat_path:?}"
));
preprocess(&listing_path, &concat_path, &incs, &defs)?;
let directives: Vec<_> = std::iter::once("#include \"concat.h\"\n".to_string())
.chain(
submatches
.values_of("directive")
.unwrap_or_default()
.map(|s| format!("{s}\n")),
)
.collect();
create_rs_file(&rs_path, &directives)?;
}
Some(submatches) => {
let case: ReproCase = serde_json::from_reader(File::open(PathBuf::from(
submatches.value_of("repro").unwrap(),
))?)
.unwrap();
// Replace the headers in the config
let mut config: IncludeCppConfig = syn::parse_str(&case.config).unwrap();
config.replace_included_headers("concat.h");
create_file(
&rs_path,
&format!("autocxx::include_cpp!({});", config.to_token_stream()),
)?;
if let Some(header) = submatches.value_of("header") {
std::fs::copy(PathBuf::from(header), &concat_path)?;
} else {
create_file(&concat_path, &case.header)?
}
}
}
let suppress_cxx_classes = match matches.value_of("suppress-cxx-inclusions").unwrap() {
"yes" => true,
"no" => false,
"auto" => detect_cxx_h(&concat_path)?,
_ => panic!("unexpected value"),
};
let cxx_suppressions = if suppress_cxx_classes {
get_cxx_suppressions()
} else {
Vec::new()
};
let extra_clang_args: Vec<_> = matches
.values_of("clang-args")
.unwrap_or_default()
.map(Cow::Borrowed)
.chain(cxx_suppressions.into_iter().map(Cow::Owned))
.collect();
let extra_clang_args: Vec<&str> = extra_clang_args.iter().map(|s| s.as_ref()).collect_vec();
let gen_cmd = matches.value_of("gen-cmd").unwrap();
if !Path::new(gen_cmd).exists() {
panic!(
"autocxx-gen not found in {gen_cmd}. hint: autocxx-reduce --gen-cmd /path/to/autocxx-gen"
);
}
run_sample_gen_cmd(gen_cmd, &rs_path, tmp_dir.path(), &extra_clang_args)?;
// Create and run an interestingness test which does not filter its output through grep.
let demo_interestingness_test_dir = tmp_dir.path().join("demo-interestingness-test");
std::fs::create_dir(&demo_interestingness_test_dir).unwrap();
let interestingness_test = demo_interestingness_test_dir.join("test-demo.sh");
create_interestingness_test(
&matches,
gen_cmd,
&interestingness_test,
None,
&rs_path,
&extra_clang_args,
)?;
let demo_dir_concat_path = demo_interestingness_test_dir.join("concat.h");
std::fs::copy(&concat_path, demo_dir_concat_path).unwrap();
run_demo_interestingness_test(&demo_interestingness_test_dir, &interestingness_test).unwrap();
// Now the main interestingness test
let interestingness_test = tmp_dir.path().join("test.sh");
create_interestingness_test(
&matches,
gen_cmd,
&interestingness_test,
Some(matches.value_of("problem").unwrap()),
&rs_path,
&extra_clang_args,
)?;
run_creduce(
matches.value_of("creduce").unwrap(),
&interestingness_test,
&concat_path,
matches.values_of("creduce-args").unwrap_or_default(),
);
announce_progress("creduce completed");
let output_path = matches.value_of("output");
match output_path {
None => print_minimized_case(&concat_path)?,
Some(output_path) => {
std::fs::copy(&concat_path, PathBuf::from(output_path))?;
}
};
Ok(())
}
/// Try to detect whether the preprocessed source code already contains
/// a preprocessed version of cxx.h. This is hard because all the comments
/// and preprocessor symbols may have been removed, and in fact if we're
/// part way through reduction, parts of the code may have been removed too.
fn detect_cxx_h(concat_path: &Path) -> Result<bool, std::io::Error> {
let haystack = std::fs::read_to_string(concat_path)?;
Ok(["class Box", "class Vec", "class Slice"]
.iter()
.all(|needle| haystack.contains(needle)))
}
fn announce_progress(msg: &str) {
println!("=== {msg} ===");
}
fn print_minimized_case(concat_path: &Path) -> Result<(), std::io::Error> {
announce_progress("Completed. Minimized test case:");
let contents = std::fs::read_to_string(concat_path)?;
println!("{contents}");
Ok(())
}
/// Arguments we pass to creduce if supported. This pass always seems to cause a crash
/// as far as I can tell, so always exclude it. It may be environment-dependent,
/// of course, but as I'm the primary user of this tool I am ruthlessly removing it.
const REMOVE_PASS_LINE_MARKERS: &[&str] = &["--remove-pass", "pass_line_markers", "*"];
const SKIP_INITIAL_PASSES: &[&str] = &["--skip-initial-passes"];
fn creduce_supports_remove_pass(creduce_cmd: &str) -> bool {
let cmd = std::process::Command::new(creduce_cmd)
.arg("--help")
.output();
let msg = match cmd {
Err(error) => panic!("failed to run creduce. creduce_cmd = {creduce_cmd}. hint: autocxx-reduce --creduce /path/to/creduce. error = {error}"),
Ok(result) => result.stdout
};
let msg = std::str::from_utf8(&msg).unwrap();
msg.contains("--remove-pass")
}
fn run_creduce<'a>(
creduce_cmd: &str,
interestingness_test: &'a Path,
concat_path: &'a Path,
creduce_args: impl Iterator<Item = &'a str>,
) {
announce_progress("creduce");
let args = std::iter::once(interestingness_test.to_str().unwrap())
.chain(std::iter::once(concat_path.to_str().unwrap()))
.chain(creduce_args)
.chain(
if creduce_supports_remove_pass(creduce_cmd) {
REMOVE_PASS_LINE_MARKERS
} else {
SKIP_INITIAL_PASSES
}
.iter()
.copied(),
)
.collect::<Vec<_>>();
println!("Command: {} {}", creduce_cmd, args.join(" "));
std::process::Command::new(creduce_cmd)
.args(args)
.status()
.expect("failed to creduce");
}
fn run_sample_gen_cmd(
gen_cmd: &str,
rs_file: &Path,
tmp_dir: &Path,
extra_clang_args: &[&str],
) -> Result<(), std::io::Error> {
let args = format_gen_cmd(rs_file, tmp_dir.to_str().unwrap(), extra_clang_args);
let args = args.collect::<Vec<_>>();
let args_str = args.join(" ");
announce_progress(&format!("Running sample gen cmd: {gen_cmd} {args_str}"));
std::process::Command::new(gen_cmd).args(args).status()?;
Ok(())
}
fn run_demo_interestingness_test(demo_dir: &Path, test: &Path) -> Result<(), std::io::Error> {
announce_progress(&format!(
"Running demo interestingness test in {}",
demo_dir.to_string_lossy()
));
std::process::Command::new(test)
.current_dir(demo_dir)
.status()?;
Ok(())
}
fn format_gen_cmd<'a>(
rs_file: &Path,
dir: &str,
extra_clang_args: &'a [&str],
) -> impl Iterator<Item = String> + 'a {
let args = [
"-o".to_string(),
dir.to_string(),
"-I".to_string(),
dir.to_string(),
rs_file.to_str().unwrap().to_string(),
"--gen-rs-include".to_string(),
"--gen-cpp".to_string(),
"--suppress-system-headers".to_string(),
"--".to_string(),
]
.to_vec();
args.into_iter()
.chain(extra_clang_args.iter().map(|s| s.to_string()))
}
fn create_interestingness_test(
matches: &ArgMatches, | gen_cmd: &str, | random_line_split |
|
main.rs | 're looking for... may be in logs, or in generated C++, or generated .rs")
.takes_value(true),
)
.arg(
Arg::new("creduce")
.long("creduce")
.value_name("PATH")
.help("creduce binary location")
.default_value("creduce")
.takes_value(true),
)
.arg(
Arg::new("output")
.short('o')
.long("output")
.value_name("OUTPUT")
.help("where to write minimized output")
.takes_value(true),
)
.arg(
Arg::new("gen-cmd")
.short('g')
.long("gen-cmd")
.value_name("GEN-CMD")
.help("where to find autocxx-gen")
.default_value(&default_gen_cmd)
.takes_value(true),
)
.arg(
Arg::new("rustc")
.long("rustc")
.value_name("RUSTC")
.help("where to find rustc")
.default_value("rustc")
.takes_value(true),
)
.arg(
Arg::new("rlibs")
.long("rlibs")
.value_name("LIBDIR")
.help("where to find rlibs/rmetas for cxx and autocxx")
.default_values(default_rlibs)
.multiple_values(true)
.takes_value(true),
)
.arg(
Arg::new("keep")
.short('k')
.long("keep-dir")
.help("keep the temporary directory for debugging purposes"),
)
.arg(
Arg::new("clang-args")
.short('c')
.long("clang-arg")
.multiple_occurrences(true)
.value_name("CLANG_ARG")
.help("Extra arguments to pass to Clang"),
)
.arg(
Arg::new("creduce-args")
.long("creduce-arg")
.multiple_occurrences(true)
.value_name("CREDUCE_ARG")
.help("Extra arguments to pass to Clang"),
)
.arg(
Arg::new("no-precompile")
.long("no-precompile")
.help("Do not precompile the C++ header before passing to autocxxgen"),
)
.arg(
Arg::new("no-postcompile")
.long("no-postcompile")
.help("Do not post-compile the C++ generated by autocxxgen"),
)
.arg(
Arg::new("no-rustc")
.long("no-rustc")
.help("Do not compile the rust generated by autocxxgen"),
)
.arg(
Arg::new("suppress-cxx-inclusions")
.long("suppress-cxx-inclusions")
.takes_value(true)
.possible_value("yes")
.possible_value("no")
.possible_value("auto")
.default_value("auto")
.help("Whether the preprocessed header already includes cxx.h. If so, we'll try to suppress the natural behavior of cxx to include duplicate definitions of some of the types within gen0.cc.")
)
.arg_required_else_help(true)
.get_matches();
run(matches).unwrap();
}
fn run(matches: ArgMatches) -> Result<(), std::io::Error> {
let keep_tmp = matches.is_present("keep");
let tmp_dir = TempDir::new()?;
let r = do_run(matches, &tmp_dir);
if keep_tmp {
println!(
"Keeping temp dir created at: {}",
tmp_dir.into_path().to_str().unwrap()
);
}
r
}
#[derive(serde_derive::Deserialize)]
struct ReproCase {
config: String,
header: String,
}
fn do_run(matches: ArgMatches, tmp_dir: &TempDir) -> Result<(), std::io::Error> {
let rs_path = tmp_dir.path().join("input.rs");
let concat_path = tmp_dir.path().join("concat.h");
match matches.subcommand_matches("repro") {
None => {
let submatches = matches.subcommand_matches("file").unwrap();
let incs: Vec<_> = submatches
.values_of("inc")
.unwrap_or_default()
.map(PathBuf::from)
.collect();
let defs: Vec<_> = submatches.values_of("define").unwrap_or_default().collect();
let headers: Vec<_> = submatches.values_of("header").unwrap_or_default().collect();
assert!(!headers.is_empty());
let listing_path = tmp_dir.path().join("listing.h");
create_concatenated_header(&headers, &listing_path)?;
announce_progress(&format!(
"Preprocessing {listing_path:?} to {concat_path:?}"
));
preprocess(&listing_path, &concat_path, &incs, &defs)?;
let directives: Vec<_> = std::iter::once("#include \"concat.h\"\n".to_string())
.chain(
submatches
.values_of("directive")
.unwrap_or_default()
.map(|s| format!("{s}\n")),
)
.collect();
create_rs_file(&rs_path, &directives)?;
}
Some(submatches) => {
let case: ReproCase = serde_json::from_reader(File::open(PathBuf::from(
submatches.value_of("repro").unwrap(),
))?)
.unwrap();
// Replace the headers in the config
let mut config: IncludeCppConfig = syn::parse_str(&case.config).unwrap();
config.replace_included_headers("concat.h");
create_file(
&rs_path,
&format!("autocxx::include_cpp!({});", config.to_token_stream()),
)?;
if let Some(header) = submatches.value_of("header") {
std::fs::copy(PathBuf::from(header), &concat_path)?;
} else {
create_file(&concat_path, &case.header)?
}
}
}
let suppress_cxx_classes = match matches.value_of("suppress-cxx-inclusions").unwrap() {
"yes" => true,
"no" => false,
"auto" => detect_cxx_h(&concat_path)?,
_ => panic!("unexpected value"),
};
let cxx_suppressions = if suppress_cxx_classes {
get_cxx_suppressions()
} else {
Vec::new()
};
let extra_clang_args: Vec<_> = matches
.values_of("clang-args")
.unwrap_or_default()
.map(Cow::Borrowed)
.chain(cxx_suppressions.into_iter().map(Cow::Owned))
.collect();
let extra_clang_args: Vec<&str> = extra_clang_args.iter().map(|s| s.as_ref()).collect_vec();
let gen_cmd = matches.value_of("gen-cmd").unwrap();
if !Path::new(gen_cmd).exists() {
panic!(
"autocxx-gen not found in {gen_cmd}. hint: autocxx-reduce --gen-cmd /path/to/autocxx-gen"
);
}
run_sample_gen_cmd(gen_cmd, &rs_path, tmp_dir.path(), &extra_clang_args)?;
// Create and run an interestingness test which does not filter its output through grep.
let demo_interestingness_test_dir = tmp_dir.path().join("demo-interestingness-test");
std::fs::create_dir(&demo_interestingness_test_dir).unwrap();
let interestingness_test = demo_interestingness_test_dir.join("test-demo.sh");
create_interestingness_test(
&matches,
gen_cmd,
&interestingness_test,
None,
&rs_path,
&extra_clang_args,
)?;
let demo_dir_concat_path = demo_interestingness_test_dir.join("concat.h");
std::fs::copy(&concat_path, demo_dir_concat_path).unwrap();
run_demo_interestingness_test(&demo_interestingness_test_dir, &interestingness_test).unwrap();
// Now the main interestingness test
let interestingness_test = tmp_dir.path().join("test.sh");
create_interestingness_test(
&matches,
gen_cmd,
&interestingness_test,
Some(matches.value_of("problem").unwrap()),
&rs_path,
&extra_clang_args,
)?;
run_creduce(
matches.value_of("creduce").unwrap(),
&interestingness_test,
&concat_path,
matches.values_of("creduce-args").unwrap_or_default(),
);
announce_progress("creduce completed");
let output_path = matches.value_of("output");
match output_path {
None => print_minimized_case(&concat_path)?,
Some(output_path) => |
};
Ok(())
}
/// Try to detect whether the preprocessed source code already contains
/// a preprocessed version of cxx.h. This is hard because all the comments
/// and preprocessor symbols may have been removed, and in fact if we're
/// part way through reduction, parts of the code may have been removed too.
fn detect_cxx_h(concat_path: &Path) -> Result<bool, std::io::Error> {
let haystack = std::fs::read_to_string(concat_path)?;
Ok(["class Box", "class Vec", "class Slice"]
.iter()
.all(|needle| haystack.contains(needle)))
}
| {
std::fs::copy(&concat_path, PathBuf::from(output_path))?;
} | conditional_block |
cn.js | () {
return (
<Layout
title="华炎魔方,华炎办公,审批王,低代码,零代码,快速开发工具,企业PaaS平台"
description="华炎魔方是一款随需应变的管理软件开发工具,旨在通过其强大的敏捷性、灵活性和开放性帮助企业创新、扩展和集成企业业务系统。基于该平台,您可以快速创建智能化、移动化的企业应用。"
keywords={["低代码,低代码开发,低代码开发平台,开源低代码开发平台,快速开发平台,快速开发工具,paas,零代码,零代码开发,零代码开发平台"]}
>
<section className="flex bg-cover bg-no-repeat bg-gray-100">
<div className="mx-auto max-w-screen-xl px-4 sm:px-6 lg:px-8 my-16">
<div className="lg:grid lg:grid-cols-12 lg:gap-8">
<div className="sm:text-center md:max-w-2xl md:mx-auto lg:col-span-6 lg:text-left">
{/* <div className="text-sm font-semibold uppercase tracking-wide text-gray-700 sm:text-base lg:text-sm xl:text-base">
新一代低代码开发平台
</div> */}
<h2 className="mt-1 text-4xl tracking-tight leading-10 font-extrabold text-gray-900 sm:leading-none sm:text-4xl lg:text-5xl xl:text-6xl">
高效搭建企业应用的
<br className="hidden md:inline"/>
<span className="text-green-700">神奇魔方</span>
</h2>
<p className="mt-3 text-base text-gray-700 sm:mt-5 sm:text-xl lg:text-lg xl:text-xl">
华炎魔方基于商业智能和模型驱动,即使是不懂编程的业务人员,也能轻松便捷地创建智能化、移动化的企业应用。
</p>
<div className="mt-5 sm:mt-8 sm:flex sm:justify-center lg:justify-start">
<div className="rounded-md shadow">
<a href="http://oss.steedos.com/apps/pdfviewer/web/viewer.html?file=http://oss.steedos.com/docs/%E5%8D%8E%E7%82%8E%E9%AD%94%E6%96%B9%E6%8A%80%E6%9C%AF%E7%99%BD%E7%9A%AE%E4%B9%A6.pdf" target="_blank" className="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base leading-6 font-medium rounded-md text-white bg-green-600 hover:bg-green-500 focus:outline-none focus:border-green-700 focus:shadow-outline-teal transition duration-150 ease-in-out md:py-4 md:text-lg md:px-10 hover:text-white">
技术白皮书
</a>
</div>
<div className="mt-3 sm:mt-0 sm:ml-3">
<a href="/form/trial/" target="_blank" className="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base leading-6 font-medium rounded-md text-green-700 bg-green-100 hover:text-green-600 hover:bg-green-50 focus:outline-none focus:shadow-outline-teal focus:border-green-300 transition duration-150 ease-in-out md:py-4 md:text-lg md:px-10">
预约演示
</a>
</div>
</div>
</div>
<div className="mt-12 relative sm:max-w-lg sm:mx-auto lg:mt-0 lg:max-w-none lg:mx-0 lg:col-span-6 lg:flex lg:items-center">
<div className="relative mx-auto w-full lg:max-w-md">
<div className="relative block w-full rounded-lg shadow-lg overflow-hidden focus:outline-none focus:shadow-outline">
<Video
poster="https://www-steedos-com.oss-accelerate.aliyuncs.com/videos/creator/steedos-guide.jpg"
autoplay={false}
urls={[
{name:"高清", url:"https://www-steedos-com.oss-accelerate.aliyuncs.com/videos/creator/steedos-guide.mp4"},
]}/>
</div>
</div>
</div>
</div>
</div>
</section>
<section className="flex bg-cover bg-no-repeat bg-gray-50">
<div className="mx-auto max-w-screen-xl px-4 sm:px-6 lg:px-8 my-16">
<div class="relative">
<div class="lg:grid lg:grid-flow-row-dense lg:grid-cols-2 lg:gap-8 lg:items-center">
<div class="lg:col-start-2">
<h3 class="text-2xl font-extrabold text-gray-900 tracking-tight sm:text-3xl">
轻松搭建、快速创新,赋能每个员工
</h3>
<p class="mt-3 text-lg text-gray-500">
</p>
<dl class="mt-10 space-y-10">
<div class="relative">
<dt>
<svg class="absolute h-6 w-6 text-green-500" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor" aria-hidden="true">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M5 13l4 4L19 7" />
</svg>
<p class="ml-9 text-lg leading-6 text-gray-700">只需点击鼠标,就能进行应用系统的编码和设计,帮助业务人员和IT部门融合在一起,在一个平台上轻松协作。</p>
</dt>
<dd class="mt-2 ml-9 text-base text-gray-500">
</dd>
</div>
<div class="relative">
<dt>
<svg class="absolute h-6 w-6 text-green-500" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor" aria-hidden="true">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M5 13l4 4L19 7" />
</svg>
<p class="ml-9 text-lg leading-6 text-gray-700">基于领先的人工智能和现代计算技术,管理每个部门的业务并实现自动化处理,加速企业数字化转型。</p>
</dt>
<dd class="mt-2 ml-9 text-base text-gray-500">
</dd>
</div>
<div class="relative">
<dt>
<svg class="absolute h-6 w-6 text-green-500" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor" aria-hidden="true">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M5 13l4 4L19 7" />
</svg>
<p class="ml-9 text-lg leading-6 text-gray-700">提供开箱即用的安全性和治理功能,您可以在受信任、完全开源的平台上自信地扩展和构建。</p>
</dt>
<dd class="mt-2 ml-9 text-base text-gray-500">
</dd>
</div>
</dl>
</div>
<div class="mt-10 -mx-4 relative lg:mt-0 lg:col-start-1">
<svg class="absolute left-1/2 transform -translate-x-1/2 translate-y-16 lg:hidden" width="784" height="404" fill="none" viewBox="0 0 784 404" aria-hidden="true">
<defs>
<pattern id="e80155a9-dfde-425a-b5ea-1f6fadd20131" x="0" y="0" width="20" height="20" patternUnits="userSpaceOnUse">
<rect x="0" y="0" width="4" height="4" class="text-gray-200" fill="currentColor"></rect>
</pattern>
</defs>
<rect width="784" height="404" fill="url(#e80155a9-dfde-425a-b5ea-1f6fadd20131)"></rect>
</svg>
<img class="relative mx-auto" width="490" src="https://images.unsplash | Landing | identifier_name |
|
cn.js | <p className="mt-3 text-base text-gray-700 sm:mt-5 sm:text-xl lg:text-lg xl:text-xl">
华炎魔方基于商业智能和模型驱动,即使是不懂编程的业务人员,也能轻松便捷地创建智能化、移动化的企业应用。
</p>
<div className="mt-5 sm:mt-8 sm:flex sm:justify-center lg:justify-start">
<div className="rounded-md shadow">
<a href="http://oss.steedos.com/apps/pdfviewer/web/viewer.html?file=http://oss.steedos.com/docs/%E5%8D%8E%E7%82%8E%E9%AD%94%E6%96%B9%E6%8A%80%E6%9C%AF%E7%99%BD%E7%9A%AE%E4%B9%A6.pdf" target="_blank" className="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base leading-6 font-medium rounded-md text-white bg-green-600 hover:bg-green-500 focus:outline-none focus:border-green-700 focus:shadow-outline-teal transition duration-150 ease-in-out md:py-4 md:text-lg md:px-10 hover:text-white">
技术白皮书
</a>
</div>
<div className="mt-3 sm:mt-0 sm:ml-3">
<a href="/form/trial/" target="_blank" className="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base leading-6 font-medium rounded-md text-green-700 bg-green-100 hover:text-green-600 hover:bg-green-50 focus:outline-none focus:shadow-outline-teal focus:border-green-300 transition duration-150 ease-in-out md:py-4 md:text-lg md:px-10">
预约演示
</a>
</div>
</div>
</div>
<div className="mt-12 relative sm:max-w-lg sm:mx-auto lg:mt-0 lg:max-w-none lg:mx-0 lg:col-span-6 lg:flex lg:items-center">
<div className="relative mx-auto w-full lg:max-w-md">
<div className="relative block w-full rounded-lg shadow-lg overflow-hidden focus:outline-none focus:shadow-outline">
<Video
poster="https://www-steedos-com.oss-accelerate.aliyuncs.com/videos/creator/steedos-guide.jpg"
autoplay={false}
urls={[
{name:"高清", url:"https://www-steedos-com.oss-accelerate.aliyuncs.com/videos/creator/steedos-guide.mp4"},
]}/>
</div>
</div>
</div>
</div>
</div>
</section>
<section className="flex bg-cover bg-no-repeat bg-gray-50">
<div className="mx-auto max-w-screen-xl px-4 sm:px-6 lg:px-8 my-16">
<div class="relative">
<div class="lg:grid lg:grid-flow-row-dense lg:grid-cols-2 lg:gap-8 lg:items-center">
<div class="lg:col-start-2">
<h3 class="text-2xl font-extrabold text-gray-900 tracking-tight sm:text-3xl">
轻松搭建、快速创新,赋能每个员工
</h3>
<p class="mt-3 text-lg text-gray-500">
</p>
<dl class="mt-10 space-y-10">
<div class="relative">
<dt>
<svg class="absolute h-6 w-6 text-green-500" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor" aria-hidden="true">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M5 13l4 4L19 7" />
</svg>
<p class="ml-9 text-lg leading-6 text-gray-700">只需点击鼠标,就能进行应用系统的编码和设计,帮助业务人员和IT部门融合在一起,在一个平台上轻松协作。</p>
</dt>
<dd class="mt-2 ml-9 text-base text-gray-500">
</dd>
</div>
<div class="relative">
<dt>
<svg class="absolute h-6 w-6 text-green-500" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor" aria-hidden="true">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M5 13l4 4L19 7" />
</svg>
<p class="ml-9 text-lg leading-6 text-gray-700">基于领先的人工智能和现代计算技术,管理每个部门的业务并实现自动化处理,加速企业数字化转型。</p>
</dt>
<dd class="mt-2 ml-9 text-base text-gray-500">
</dd>
</div>
<div class="relative">
<dt>
<svg class="absolute h-6 w-6 text-green-500" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor" aria-hidden="true">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M5 13l4 4L19 7" />
</svg>
<p class="ml-9 text-lg leading-6 text-gray-700">提供开箱即用的安全性和治理功能,您可以在受信任、完全开源的平台上自信地扩展和构建。</p>
</dt>
<dd class="mt-2 ml-9 text-base text-gray-500">
</dd>
</div>
</dl>
</div>
<div class="mt-10 -mx-4 relative lg:mt-0 lg:col-start-1">
<svg class="absolute left-1/2 transform -translate-x-1/2 translate-y-16 lg:hidden" width="784" height="404" fill="none" viewBox="0 0 784 404" aria-hidden="true">
<defs>
<pattern id="e80155a9-dfde-425a-b5ea-1f6fadd20131" x="0" y="0" width="20" height="20" patternUnits="userSpaceOnUse">
<rect x="0" y="0" width="4" height="4" class="text-gray-200" fill="currentColor"></rect>
</pattern>
</defs>
<rect width="784" height="404" fill="url(#e80155a9-dfde-425a-b5ea-1f6fadd20131)"></rect>
</svg>
<img class="relative mx-auto" width="490" src="https://images.unsplash.com/photo- | {
return (
<Layout
title="华炎魔方,华炎办公,审批王,低代码,零代码,快速开发工具,企业PaaS平台"
description="华炎魔方是一款随需应变的管理软件开发工具,旨在通过其强大的敏捷性、灵活性和开放性帮助企业创新、扩展和集成企业业务系统。基于该平台,您可以快速创建智能化、移动化的企业应用。"
keywords={["低代码,低代码开发,低代码开发平台,开源低代码开发平台,快速开发平台,快速开发工具,paas,零代码,零代码开发,零代码开发平台"]}
>
<section className="flex bg-cover bg-no-repeat bg-gray-100">
<div className="mx-auto max-w-screen-xl px-4 sm:px-6 lg:px-8 my-16">
<div className="lg:grid lg:grid-cols-12 lg:gap-8">
<div className="sm:text-center md:max-w-2xl md:mx-auto lg:col-span-6 lg:text-left">
{/* <div className="text-sm font-semibold uppercase tracking-wide text-gray-700 sm:text-base lg:text-sm xl:text-base">
新一代低代码开发平台
</div> */}
<h2 className="mt-1 text-4xl tracking-tight leading-10 font-extrabold text-gray-900 sm:leading-none sm:text-4xl lg:text-5xl xl:text-6xl">
高效搭建企业应用的
<br className="hidden md:inline"/>
<span className="text-green-700">神奇魔方</span>
</h2> | identifier_body |
|
cn.js | 不懂编程的业务人员,也能轻松便捷地创建智能化、移动化的企业应用。
</p>
<div className="mt-5 sm:mt-8 sm:flex sm:justify-center lg:justify-start">
<div className="rounded-md shadow">
<a href="http://oss.steedos.com/apps/pdfviewer/web/viewer.html?file=http://oss.steedos.com/docs/%E5%8D%8E%E7%82%8E%E9%AD%94%E6%96%B9%E6%8A%80%E6%9C%AF%E7%99%BD%E7%9A%AE%E4%B9%A6.pdf" target="_blank" className="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base leading-6 font-medium rounded-md text-white bg-green-600 hover:bg-green-500 focus:outline-none focus:border-green-700 focus:shadow-outline-teal transition duration-150 ease-in-out md:py-4 md:text-lg md:px-10 hover:text-white">
技术白皮书
</a>
</div>
<div className="mt-3 sm:mt-0 sm:ml-3">
<a href="/form/trial/" target="_blank" className="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base leading-6 font-medium rounded-md text-green-700 bg-green-100 hover:text-green-600 hover:bg-green-50 focus:outline-none focus:shadow-outline-teal focus:border-green-300 transition duration-150 ease-in-out md:py-4 md:text-lg md:px-10">
预约演示
</a>
</div>
</div>
</div>
<div className="mt-12 relative sm:max-w-lg sm:mx-auto lg:mt-0 lg:max-w-none lg:mx-0 lg:col-span-6 lg:flex lg:items-center">
<div className="relative mx-auto w-full lg:max-w-md">
<div className="relative block w-full rounded-lg shadow-lg overflow-hidden focus:outline-none focus:shadow-outline">
<Video
poster="https://www-steedos-com.oss-accelerate.aliyuncs.com/videos/creator/steedos-guide.jpg"
autoplay={false} | </div>
</div>
</div>
</div>
</div>
</section>
<section className="flex bg-cover bg-no-repeat bg-gray-50">
<div className="mx-auto max-w-screen-xl px-4 sm:px-6 lg:px-8 my-16">
<div class="relative">
<div class="lg:grid lg:grid-flow-row-dense lg:grid-cols-2 lg:gap-8 lg:items-center">
<div class="lg:col-start-2">
<h3 class="text-2xl font-extrabold text-gray-900 tracking-tight sm:text-3xl">
轻松搭建、快速创新,赋能每个员工
</h3>
<p class="mt-3 text-lg text-gray-500">
</p>
<dl class="mt-10 space-y-10">
<div class="relative">
<dt>
<svg class="absolute h-6 w-6 text-green-500" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor" aria-hidden="true">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M5 13l4 4L19 7" />
</svg>
<p class="ml-9 text-lg leading-6 text-gray-700">只需点击鼠标,就能进行应用系统的编码和设计,帮助业务人员和IT部门融合在一起,在一个平台上轻松协作。</p>
</dt>
<dd class="mt-2 ml-9 text-base text-gray-500">
</dd>
</div>
<div class="relative">
<dt>
<svg class="absolute h-6 w-6 text-green-500" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor" aria-hidden="true">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M5 13l4 4L19 7" />
</svg>
<p class="ml-9 text-lg leading-6 text-gray-700">基于领先的人工智能和现代计算技术,管理每个部门的业务并实现自动化处理,加速企业数字化转型。</p>
</dt>
<dd class="mt-2 ml-9 text-base text-gray-500">
</dd>
</div>
<div class="relative">
<dt>
<svg class="absolute h-6 w-6 text-green-500" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor" aria-hidden="true">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M5 13l4 4L19 7" />
</svg>
<p class="ml-9 text-lg leading-6 text-gray-700">提供开箱即用的安全性和治理功能,您可以在受信任、完全开源的平台上自信地扩展和构建。</p>
</dt>
<dd class="mt-2 ml-9 text-base text-gray-500">
</dd>
</div>
</dl>
</div>
<div class="mt-10 -mx-4 relative lg:mt-0 lg:col-start-1">
<svg class="absolute left-1/2 transform -translate-x-1/2 translate-y-16 lg:hidden" width="784" height="404" fill="none" viewBox="0 0 784 404" aria-hidden="true">
<defs>
<pattern id="e80155a9-dfde-425a-b5ea-1f6fadd20131" x="0" y="0" width="20" height="20" patternUnits="userSpaceOnUse">
<rect x="0" y="0" width="4" height="4" class="text-gray-200" fill="currentColor"></rect>
</pattern>
</defs>
<rect width="784" height="404" fill="url(#e80155a9-dfde-425a-b5ea-1f6fadd20131)"></rect>
</svg>
<img class="relative mx-auto" width="490" src="https://images.unsplash.com/photo-1520333789090-1afc82db536a?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=2102&q=80" alt=""/>
</div>
</div>
</div>
</div>
</section>
<div class="relative bg-gray-50 pt-16 pb-20 px-4 sm:px-6 lg:pt-24 lg:pb-28 lg:px-8">
<div class="absolute inset-0">
<div class="bg-white h-1/3 sm:h-2/3"></div>
</div>
<div class="relative max-w-7xl mx-auto">
<div class="text-center">
<h2 class="text-3xl tracking-tight font-extrabold text-gray-900 sm:text-4xl">
企业级低代码的核心特点
</h2>
<p class="mt-3 max-w-2xl mx-auto text-xl text-gray-500 sm:mt-4">
助力企业数字化转型
</p>
</div>
<div class="mt-12 max-w-lg mx-auto grid gap-5 lg:grid-cols-3 lg:max-w-none">
<div class="flex flex-col rounded-lg shadow-lg overflow-hidden">
<div class="flex-shrink-0">
<img class="h-48 w-full object-cover" src="https://images.unsplash.com/photo-1496128858413-b36217c2ce36?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format& | urls={[
{name:"高清", url:"https://www-steedos-com.oss-accelerate.aliyuncs.com/videos/creator/steedos-guide.mp4"},
]}/> | random_line_split |
model_transformer.py | :
return [match_layer.layer]
# If 2 different layers point to the same input, or if a layer uses the
# same input multiple times, the input layer can be repeated. But it
# preserves a bit of structure.
leaf_layers = []
for inp in match_layer.input_layers:
leaf_layers.extend(self._get_leaf_layers(inp))
return leaf_layers
def _get_layer_names(self, layer_node):
result = [layer_node.layer['config']['name']]
for input_layer in layer_node.input_layers:
result.extend(self._get_layer_names(input_layer))
return result
def _remove_layers(self, layers_to_remove, layers_to_remove_names):
# Remove layers.
for layer_to_remove in layers_to_remove:
self._config['layers'].remove(layer_to_remove)
# Remove entry from weight and metadata maps,
# now that layer has been removed.
for layer_name in layers_to_remove_names:
self._layer_weights_map.pop(layer_name, None)
self._layer_names_and_weights_map.pop(layer_name, None)
self._layer_metadata_map.pop(layer_name, None)
def _replace(self, match_layer_node, replacement_layer_node):
"""Replace the tree or chain of match_layer_node with replacement_layer_node."""
if self._is_functional_model(self.model):
self._replace_functional(match_layer_node, replacement_layer_node)
else:
self._replace_sequential(match_layer_node, replacement_layer_node)
def _replace_functional(self, match_layer_node, replacement_layer_node):
"""Functional model: replace the tree of match_layer_node with replacement_layer_node."""
# 1. Point all consumers of the head of the matching sub-tree to the head
# replacement layer.
#
# There are some assumptions baked in. The head layer only has 1 inbound and
# outbound node. The resulting number and shape of tensors from the
# replaced layer should equal the original layer.
consuming_layers = self._get_consuming_layers(match_layer_node.layer)
match_name = match_layer_node.layer['config']['name']
replacement_name = replacement_layer_node.layer['config']['name']
def _replace_layer_name_for_connection_info(connection_info, match_name,
replacement_name):
if connection_info[0] == match_name:
connection_info[0] = replacement_name
for key in connection_info[3]:
if isinstance(connection_info[3][key], list):
if connection_info[3][key][0] == match_name:
connection_info[3][key][0] = replacement_name
for consumer in consuming_layers:
for inbound_node in self._inbound_node_generator(consumer):
if isinstance(inbound_node, dict):
inbound_node = inbound_node.values()
for connection_info in inbound_node:
_replace_layer_name_for_connection_info(connection_info, match_name,
replacement_name)
output_consumers = self._get_output_consumers(match_layer_node.layer)
for output_consumer in output_consumers:
output_consumer[0] = replacement_layer_node.layer['config']['name']
# 2. Create inbound nodes for the replacement layers. This connects all
# the replacement layers.
def _assign_inbounds_for_replacement(layer_node):
"""_assign_inbounds_for_replacement."""
if not layer_node.input_layers:
return
layer_node.layer['inbound_nodes'] = [[]]
for input_layer in layer_node.input_layers:
# inbound_nodes can be specific tensors from multiple inbound
# connections. We make the following assumptions.
# - Only 1 inbound node for each replacement layer.
# - Only 1 tensor output from the previous layer which is connected.
# - call() method during construction does not have any args.
# These are reasonable assumptions for almost all case we are
# interested in.
layer_node.layer['inbound_nodes'][0].append(
[input_layer.layer['config']['name'], 0, 0, {}])
_assign_inbounds_for_replacement(input_layer)
_assign_inbounds_for_replacement(replacement_layer_node)
# 3. Connect the leaves of the replacement_layers to the inbound_nodes of
# the leaves in the original layer.
original_leaf_layers = self._get_leaf_layers(match_layer_node)
original_inbound_nodes = [
layer['inbound_nodes'] for layer in original_leaf_layers
]
replacement_leaf_layers = self._get_leaf_layers(replacement_layer_node)
# The original pattern and the replacement pattern can potentially have
# different number of leaf nodes and differences in how they consume these
# input layers. Matching them will require sophisticated hackery to recreate
# the new layers with the original input structure.
# Given our existing transforms, we can assume they match.
if len(original_leaf_layers) != len(replacement_leaf_layers):
raise RuntimeError('Different size of leaf layers not supported yet.')
for original_inbound_nodes, replacement_leaf_layer in zip(
original_inbound_nodes, replacement_leaf_layers):
replacement_leaf_layer['inbound_nodes'] = original_inbound_nodes
# 4. Remove the original matched layers
layers_to_remove_names = self._get_layer_names(match_layer_node)
layers_to_remove = self._get_layers(layers_to_remove_names)
self._remove_layers(layers_to_remove, layers_to_remove_names)
# 5. Add in the new layers.
def _add_replacement_layer(layer_node):
"""Recursively add new layers."""
self._config['layers'].append(layer_node.layer)
layer_name = layer_node.layer['config']['name']
# TODO(b/184603494): Remove weight map structure from model_transformer.
if layer_node.weights:
self._layer_weights_map[layer_name] = layer_node.weights
if layer_node.names_and_weights:
self._layer_names_and_weights_map[
layer_name] = layer_node.names_and_weights
if layer_node.metadata:
self._layer_metadata_map[layer_name] = layer_node.metadata
if self.candidate_layers:
self.candidate_layers.add(layer_name)
for input_layer in layer_node.input_layers:
_add_replacement_layer(input_layer)
_add_replacement_layer(replacement_layer_node)
def _replace_sequential(self, match_layer_node, replacement_layer_node):
"""Sequential model: replace the chain of match_layer_node with replacement_layer_node."""
# 1. Remove the original matched layers.
layers_to_remove_names = self._get_layer_names(match_layer_node)
layers_to_remove = self._get_layers(layers_to_remove_names)
# These variables are needed when adding the new layers
# and must be set before _remove_layers removes them.
first_layer_removed = layers_to_remove[0]
first_layer_removed_index = self._config['layers'].index(
first_layer_removed)
self._remove_layers(layers_to_remove, layers_to_remove_names)
# 2. Add in the new layers.
def _get_replacement_nodes(replacement_node):
"""Get list of replacement nodes in Sequential order."""
replacement_nodes = []
for input_layer in replacement_node.input_layers:
replacement_nodes.extend(_get_replacement_nodes(input_layer))
replacement_nodes.append(replacement_node)
return replacement_nodes
def _add_replacement_nodes(first_layer_removed_index, replacement_nodes):
"""Add replacement nodes to Sequential model."""
# Potentially insert nodes into middle of model.
i = first_layer_removed_index
for replacement_node in replacement_nodes:
self._config['layers'].insert(i, replacement_node.layer)
layer_name = replacement_node.layer['config']['name']
if replacement_node.weights:
self._layer_weights_map[layer_name] = replacement_node.weights
if replacement_node.names_and_weights:
self._layer_names_and_weights_map[
layer_name] = replacement_node.names_and_weights
if replacement_node.metadata:
self._layer_metadata_map[layer_name] = replacement_node.metadata
if self.candidate_layers:
self.candidate_layers.add(layer_name)
i += 1
replacement_nodes = _get_replacement_nodes(replacement_layer_node)
_add_replacement_nodes(first_layer_removed_index, replacement_nodes)
def _weight_name(self, name):
"""Extracts the weight name by removing layer from TF variable name.
For example, returns 'kernel:0' for 'dense_2/kernel:0'.
Args:
name: TensorFlow variable name.
Returns:
Extracted weight name.
"""
return name.split('/')[-1]
def _get_keras_layer_weights(self, keras_layer):
"""Returns a map of weight name, weight matrix. Keeps keras ordering."""
weights_map = collections.OrderedDict()
for weight_tensor, weight_numpy in \
zip(keras_layer.weights, keras_layer.get_weights()):
weights_map[self._weight_name(weight_tensor.name)] = weight_numpy
if len(weights_map) != len(keras_layer.weights):
# The case that variable identifier is not unique. It's a fallback that
# uses weight list instead of the weights map.
return None
return weights_map
def _get_keras_layer_names_and_weights(self, keras_layer):
return zip([weight.name for weight in keras_layer.weights],
keras_layer.get_weights())
def _set_layer_weights(self, layer, weights_map):
"""Sets the values of weights in a Keras layer."""
weight_value_tuples = []
for weight_tensor in layer.weights:
weight_name = self._weight_name(weight_tensor.name)
if weight_name in weights_map: | weight_value_tuples.append((weight_tensor, weights_map[weight_name]))
K.batch_set_value(weight_value_tuples) | random_line_split |
|
model_transformer.py | match_layer(layer, pattern):
return None
if self._is_functional_model(
self.model) and not self._is_match_supported(layer, is_head_node):
return None
if len(pattern.inputs) == 0:
# Leaf layer in pattern.
return LayerNode(
layer, self._get_layer_weights(layer['config']['name']), [],
self._get_layer_metadata(layer['config']['name']),
self._get_layer_names_and_weights(layer['config']['name']))
# There is a possible edge case where a single layer may output multiple
# tensors and multiple tensors from that layer may be used by the
# connection. Ignoring those for now.
input_layer_names = self._get_input_layer_names(layer)
input_layers = self._get_layers(input_layer_names)
if len(input_layers) != len(pattern.inputs):
# Number of inputs this layer takes is different from the number of
# inputs in the pattern.
#
# This path currently has the limitation that it requires an exact number
# of inputs to match a pattern. For example, if a user wants to match
# 2 Convs -> Concat and 3 Convs -> Concat, they would need to write
# 2 different patterns.
return None
# Inbound layers can have different order from the list of input patterns.
# TODO(pulkitb): Fix by checking all permutations.
input_match_layer_nodes = []
for input_layer, pattern_ in zip(input_layers, pattern.inputs):
match_layer_node = self._match_layer_with_inputs(
input_layer, pattern_, is_head_node=False)
if not match_layer_node:
return None
input_match_layer_nodes.append(match_layer_node)
return LayerNode(layer, self._get_layer_weights(layer['config']['name']),
input_match_layer_nodes,
self._get_layer_metadata(layer['config']['name']),
self._get_layer_names_and_weights(layer['config']['name']))
def _find_pattern(self, pattern, matched_layers=None):
for layer in self._config['layers']:
if matched_layers and layer['config']['name'] in matched_layers:
continue
match_layer = self._match_layer_with_inputs(
layer, pattern, is_head_node=True)
if match_layer:
return match_layer
return None
def _get_leaf_layers(self, match_layer):
"""Return leaf layers from this sub-graph tree."""
if not match_layer.input_layers:
return [match_layer.layer]
# If 2 different layers point to the same input, or if a layer uses the
# same input multiple times, the input layer can be repeated. But it
# preserves a bit of structure.
leaf_layers = []
for inp in match_layer.input_layers:
leaf_layers.extend(self._get_leaf_layers(inp))
return leaf_layers
def _get_layer_names(self, layer_node):
result = [layer_node.layer['config']['name']]
for input_layer in layer_node.input_layers:
result.extend(self._get_layer_names(input_layer))
return result
def _remove_layers(self, layers_to_remove, layers_to_remove_names):
# Remove layers.
for layer_to_remove in layers_to_remove:
self._config['layers'].remove(layer_to_remove)
# Remove entry from weight and metadata maps,
# now that layer has been removed.
for layer_name in layers_to_remove_names:
self._layer_weights_map.pop(layer_name, None)
self._layer_names_and_weights_map.pop(layer_name, None)
self._layer_metadata_map.pop(layer_name, None)
def _replace(self, match_layer_node, replacement_layer_node):
"""Replace the tree or chain of match_layer_node with replacement_layer_node."""
if self._is_functional_model(self.model):
self._replace_functional(match_layer_node, replacement_layer_node)
else:
self._replace_sequential(match_layer_node, replacement_layer_node)
def _replace_functional(self, match_layer_node, replacement_layer_node):
"""Functional model: replace the tree of match_layer_node with replacement_layer_node."""
# 1. Point all consumers of the head of the matching sub-tree to the head
# replacement layer.
#
# There are some assumptions baked in. The head layer only has 1 inbound and
# outbound node. The resulting number and shape of tensors from the
# replaced layer should equal the original layer.
consuming_layers = self._get_consuming_layers(match_layer_node.layer)
match_name = match_layer_node.layer['config']['name']
replacement_name = replacement_layer_node.layer['config']['name']
def _replace_layer_name_for_connection_info(connection_info, match_name,
replacement_name):
if connection_info[0] == match_name:
connection_info[0] = replacement_name
for key in connection_info[3]:
if isinstance(connection_info[3][key], list):
if connection_info[3][key][0] == match_name:
connection_info[3][key][0] = replacement_name
for consumer in consuming_layers:
for inbound_node in self._inbound_node_generator(consumer):
if isinstance(inbound_node, dict):
inbound_node = inbound_node.values()
for connection_info in inbound_node:
_replace_layer_name_for_connection_info(connection_info, match_name,
replacement_name)
output_consumers = self._get_output_consumers(match_layer_node.layer)
for output_consumer in output_consumers:
output_consumer[0] = replacement_layer_node.layer['config']['name']
# 2. Create inbound nodes for the replacement layers. This connects all
# the replacement layers.
def _assign_inbounds_for_replacement(layer_node):
"""_assign_inbounds_for_replacement."""
if not layer_node.input_layers:
return
layer_node.layer['inbound_nodes'] = [[]]
for input_layer in layer_node.input_layers:
# inbound_nodes can be specific tensors from multiple inbound
# connections. We make the following assumptions.
# - Only 1 inbound node for each replacement layer.
# - Only 1 tensor output from the previous layer which is connected.
# - call() method during construction does not have any args.
# These are reasonable assumptions for almost all case we are
# interested in.
layer_node.layer['inbound_nodes'][0].append(
[input_layer.layer['config']['name'], 0, 0, {}])
_assign_inbounds_for_replacement(input_layer)
_assign_inbounds_for_replacement(replacement_layer_node)
# 3. Connect the leaves of the replacement_layers to the inbound_nodes of
# the leaves in the original layer.
original_leaf_layers = self._get_leaf_layers(match_layer_node)
original_inbound_nodes = [
layer['inbound_nodes'] for layer in original_leaf_layers
]
replacement_leaf_layers = self._get_leaf_layers(replacement_layer_node)
# The original pattern and the replacement pattern can potentially have
# different number of leaf nodes and differences in how they consume these
# input layers. Matching them will require sophisticated hackery to recreate
# the new layers with the original input structure.
# Given our existing transforms, we can assume they match.
if len(original_leaf_layers) != len(replacement_leaf_layers):
raise RuntimeError('Different size of leaf layers not supported yet.')
for original_inbound_nodes, replacement_leaf_layer in zip(
original_inbound_nodes, replacement_leaf_layers):
replacement_leaf_layer['inbound_nodes'] = original_inbound_nodes
# 4. Remove the original matched layers
layers_to_remove_names = self._get_layer_names(match_layer_node)
layers_to_remove = self._get_layers(layers_to_remove_names)
self._remove_layers(layers_to_remove, layers_to_remove_names)
# 5. Add in the new layers.
def _add_replacement_layer(layer_node):
"""Recursively add new layers."""
self._config['layers'].append(layer_node.layer)
layer_name = layer_node.layer['config']['name']
# TODO(b/184603494): Remove weight map structure from model_transformer.
if layer_node.weights:
self._layer_weights_map[layer_name] = layer_node.weights
if layer_node.names_and_weights:
self._layer_names_and_weights_map[
layer_name] = layer_node.names_and_weights
if layer_node.metadata:
self._layer_metadata_map[layer_name] = layer_node.metadata
if self.candidate_layers:
self.candidate_layers.add(layer_name)
for input_layer in layer_node.input_layers:
_add_replacement_layer(input_layer)
_add_replacement_layer(replacement_layer_node)
def _replace_sequential(self, match_layer_node, replacement_layer_node):
"""Sequential model: replace the chain of match_layer_node with replacement_layer_node."""
# 1. Remove the original matched layers.
layers_to_remove_names = self._get_layer_names(match_layer_node)
layers_to_remove = self._get_layers(layers_to_remove_names)
# These variables are needed when adding the new layers
# and must be set before _remove_layers removes them.
first_layer_removed = layers_to_remove[0]
first_layer_removed_index = self._config['layers'].index(
first_layer_removed)
self._remove_layers(layers_to_remove, layers_to_remove_names)
# 2. Add in the new layers.
def _get_replacement_nodes(replacement_node):
"""Get list of replacement nodes in Sequential order."""
replacement_nodes = []
for input_layer in replacement_node.input_layers:
| replacement_nodes.extend(_get_replacement_nodes(input_layer)) | conditional_block |
|
model_transformer.py | (layer_name, {})
def _get_layer_metadata(self, layer_name):
return self._layer_metadata_map.get(layer_name, {})
def _match_pattern(self, target, pattern):
return re.match('^' + pattern + '$', target) is not None
def _match_layer(self, layer, pattern):
"""Check if specific layer matches the pattern."""
if self.candidate_layers and \
layer['config']['name'] not in self.candidate_layers:
return False
if not self._match_pattern(layer['class_name'], pattern.class_name):
return False
layer_config = layer['config']
for key, value in pattern.config.items():
# Either the provided value should equal the config value, or
# be a regex match to str(value).
if not (self._match_pattern(str(layer_config.get(key)), str(value)) or \
layer_config.get(key) == value):
return False
return True
def _is_match_supported(self, layer, is_head_node):
"""Check if ModelTransformer supports transformations given number of inputs and outputs at a layer.
Args:
layer: layer for pattern matching. Must come from a Functional model.
is_head_node: whether this is the head node (e.g. in A -> B , B is the
head node).
Returns:
whether match is supported.
"""
inbound_nodes = layer['inbound_nodes']
if len(inbound_nodes) > 1:
# `layer` is re-used for more than 1 connection from previous layers. If
# a pattern matches one set of inputs and is replaced, it will break the
# other connection.
#
# Note that theoretically it's possible to have multiple connections have
# exactly the same pattern, and in that case the transform might be
# applied. But that's a very complicated edge case not worth handling.
return False
# If a layer has multiple inbound nodes, it will produce multiple outbound
# connections as well. Hence no need to explicitly check that.
consuming_layers = self._get_consuming_layers(layer)
output_consumers = self._get_output_consumers(layer)
if len(consuming_layers) + len(output_consumers) > 1:
# Even if a layer has only 1 incoming connection, multiple layers may
# still consume the output. Having multiple consumers is only supported
# for the head node, and not intermediate layers. Replacing intermediate
# nodes with >1 consumer will lead to dangling nodes.
#
# Note that theoretically, intermediate layers can supported, as a part
# of a general layer transform tool. This is not supported given no
# motivating use case.
if not is_head_node:
return False
return True
def _get_input_layer_names(self, layer):
|
def _match_layer_with_inputs(self, layer, pattern, is_head_node):
"""Match pattern at this layer, and continue to match at its inputs."""
if not self._match_layer(layer, pattern):
return None
if self._is_functional_model(
self.model) and not self._is_match_supported(layer, is_head_node):
return None
if len(pattern.inputs) == 0:
# Leaf layer in pattern.
return LayerNode(
layer, self._get_layer_weights(layer['config']['name']), [],
self._get_layer_metadata(layer['config']['name']),
self._get_layer_names_and_weights(layer['config']['name']))
# There is a possible edge case where a single layer may output multiple
# tensors and multiple tensors from that layer may be used by the
# connection. Ignoring those for now.
input_layer_names = self._get_input_layer_names(layer)
input_layers = self._get_layers(input_layer_names)
if len(input_layers) != len(pattern.inputs):
# Number of inputs this layer takes is different from the number of
# inputs in the pattern.
#
# This path currently has the limitation that it requires an exact number
# of inputs to match a pattern. For example, if a user wants to match
# 2 Convs -> Concat and 3 Convs -> Concat, they would need to write
# 2 different patterns.
return None
# Inbound layers can have different order from the list of input patterns.
# TODO(pulkitb): Fix by checking all permutations.
input_match_layer_nodes = []
for input_layer, pattern_ in zip(input_layers, pattern.inputs):
match_layer_node = self._match_layer_with_inputs(
input_layer, pattern_, is_head_node=False)
if not match_layer_node:
return None
input_match_layer_nodes.append(match_layer_node)
return LayerNode(layer, self._get_layer_weights(layer['config']['name']),
input_match_layer_nodes,
self._get_layer_metadata(layer['config']['name']),
self._get_layer_names_and_weights(layer['config']['name']))
def _find_pattern(self, pattern, matched_layers=None):
for layer in self._config['layers']:
if matched_layers and layer['config']['name'] in matched_layers:
continue
match_layer = self._match_layer_with_inputs(
layer, pattern, is_head_node=True)
if match_layer:
return match_layer
return None
def _get_leaf_layers(self, match_layer):
"""Return leaf layers from this sub-graph tree."""
if not match_layer.input_layers:
return [match_layer.layer]
# If 2 different layers point to the same input, or if a layer uses the
# same input multiple times, the input layer can be repeated. But it
# preserves a bit of structure.
leaf_layers = []
for inp in match_layer.input_layers:
leaf_layers.extend(self._get_leaf_layers(inp))
return leaf_layers
def _get_layer_names(self, layer_node):
result = [layer_node.layer['config']['name']]
for input_layer in layer_node.input_layers:
result.extend(self._get_layer_names(input_layer))
return result
def _remove_layers(self, layers_to_remove, layers_to_remove_names):
# Remove layers.
for layer_to_remove in layers_to_remove:
self._config['layers'].remove(layer_to_remove)
# Remove entry from weight and metadata maps,
# now that layer has been removed.
for layer_name in layers_to_remove_names:
self._layer_weights_map.pop(layer_name, None)
self._layer_names_and_weights_map.pop(layer_name, None)
self._layer_metadata_map.pop(layer_name, None)
def _replace(self, match_layer_node, replacement_layer_node):
"""Replace the tree or chain of match_layer_node with replacement_layer_node."""
if self._is_functional_model(self.model):
self._replace_functional(match_layer_node, replacement_layer_node)
else:
self._replace_sequential(match_layer_node, replacement_layer_node)
def _replace_functional(self, match_layer_node, replacement_layer_node):
"""Functional model: replace the tree of match_layer_node with replacement_layer_node."""
# 1. Point all consumers of the head of the matching sub-tree to the head
# replacement layer.
#
# There are some assumptions baked in. The head layer only has 1 inbound and
# outbound node. The resulting number and shape of tensors from the
# replaced layer should equal the original layer.
consuming_layers = self._get_consuming_layers(match_layer_node.layer)
match_name = match_layer_node.layer['config']['name']
replacement_name = replacement_layer_node.layer['config']['name']
def _replace_layer_name_for_connection_info(connection_info, match_name,
replacement_name):
if connection_info[0] == match_name:
connection_info[0] = replacement_name
for key in connection_info[3]:
if isinstance(connection_info[3][key], list):
if connection_info[3][key][0] == match_name:
connection_info[3][key][0] = replacement_name
for consumer in consuming_layers:
for inbound_node in self._inbound_node_generator(consumer):
if isinstance(inbound_node, dict):
inbound_node = inbound_node.values()
for connection_info in inbound_node:
_replace_layer_name_for_connection_info(connection_info, match_name,
replacement_name)
output_consumers = self._get_output_consumers(match_layer_node.layer)
for output_consumer in output_consumers:
output_consumer[0] = replacement_layer_node.layer['config']['name']
# 2. Create inbound nodes for the replacement layers. This connects all
# the replacement layers.
def _assign_inbounds_for_replacement(layer_node):
"""_assign_inbounds_for_replacement."""
if not layer_node.input_layers:
return
layer_node.layer['inbound_nodes'] = [[]]
for input_layer in layer_node.input_layers:
# inbound_nodes can be specific tensors from multiple inbound
# connections. We make the following assumptions.
# - Only 1 inbound node for each replacement layer.
# - Only 1 tensor output from the | """Get the names of a layer's input layers."""
if self._is_functional_model(self.model):
inbound_nodes = layer['inbound_nodes']
return [connection_info[0] for connection_info in inbound_nodes[0]]
else: # Sequential model.
layers = self._config['layers']
i = layers.index(layer)
if i == 0:
# First layer has no inputs.
return []
else:
return [layers[i - 1]['config']['name']] | identifier_body |
model_transformer.py | and layer['config']['name'] in matched_layers:
continue
match_layer = self._match_layer_with_inputs(
layer, pattern, is_head_node=True)
if match_layer:
return match_layer
return None
def _get_leaf_layers(self, match_layer):
"""Return leaf layers from this sub-graph tree."""
if not match_layer.input_layers:
return [match_layer.layer]
# If 2 different layers point to the same input, or if a layer uses the
# same input multiple times, the input layer can be repeated. But it
# preserves a bit of structure.
leaf_layers = []
for inp in match_layer.input_layers:
leaf_layers.extend(self._get_leaf_layers(inp))
return leaf_layers
def _get_layer_names(self, layer_node):
result = [layer_node.layer['config']['name']]
for input_layer in layer_node.input_layers:
result.extend(self._get_layer_names(input_layer))
return result
def _remove_layers(self, layers_to_remove, layers_to_remove_names):
# Remove layers.
for layer_to_remove in layers_to_remove:
self._config['layers'].remove(layer_to_remove)
# Remove entry from weight and metadata maps,
# now that layer has been removed.
for layer_name in layers_to_remove_names:
self._layer_weights_map.pop(layer_name, None)
self._layer_names_and_weights_map.pop(layer_name, None)
self._layer_metadata_map.pop(layer_name, None)
def _replace(self, match_layer_node, replacement_layer_node):
"""Replace the tree or chain of match_layer_node with replacement_layer_node."""
if self._is_functional_model(self.model):
self._replace_functional(match_layer_node, replacement_layer_node)
else:
self._replace_sequential(match_layer_node, replacement_layer_node)
def _replace_functional(self, match_layer_node, replacement_layer_node):
"""Functional model: replace the tree of match_layer_node with replacement_layer_node."""
# 1. Point all consumers of the head of the matching sub-tree to the head
# replacement layer.
#
# There are some assumptions baked in. The head layer only has 1 inbound and
# outbound node. The resulting number and shape of tensors from the
# replaced layer should equal the original layer.
consuming_layers = self._get_consuming_layers(match_layer_node.layer)
match_name = match_layer_node.layer['config']['name']
replacement_name = replacement_layer_node.layer['config']['name']
def _replace_layer_name_for_connection_info(connection_info, match_name,
replacement_name):
if connection_info[0] == match_name:
connection_info[0] = replacement_name
for key in connection_info[3]:
if isinstance(connection_info[3][key], list):
if connection_info[3][key][0] == match_name:
connection_info[3][key][0] = replacement_name
for consumer in consuming_layers:
for inbound_node in self._inbound_node_generator(consumer):
if isinstance(inbound_node, dict):
inbound_node = inbound_node.values()
for connection_info in inbound_node:
_replace_layer_name_for_connection_info(connection_info, match_name,
replacement_name)
output_consumers = self._get_output_consumers(match_layer_node.layer)
for output_consumer in output_consumers:
output_consumer[0] = replacement_layer_node.layer['config']['name']
# 2. Create inbound nodes for the replacement layers. This connects all
# the replacement layers.
def _assign_inbounds_for_replacement(layer_node):
"""_assign_inbounds_for_replacement."""
if not layer_node.input_layers:
return
layer_node.layer['inbound_nodes'] = [[]]
for input_layer in layer_node.input_layers:
# inbound_nodes can be specific tensors from multiple inbound
# connections. We make the following assumptions.
# - Only 1 inbound node for each replacement layer.
# - Only 1 tensor output from the previous layer which is connected.
# - call() method during construction does not have any args.
# These are reasonable assumptions for almost all case we are
# interested in.
layer_node.layer['inbound_nodes'][0].append(
[input_layer.layer['config']['name'], 0, 0, {}])
_assign_inbounds_for_replacement(input_layer)
_assign_inbounds_for_replacement(replacement_layer_node)
# 3. Connect the leaves of the replacement_layers to the inbound_nodes of
# the leaves in the original layer.
original_leaf_layers = self._get_leaf_layers(match_layer_node)
original_inbound_nodes = [
layer['inbound_nodes'] for layer in original_leaf_layers
]
replacement_leaf_layers = self._get_leaf_layers(replacement_layer_node)
# The original pattern and the replacement pattern can potentially have
# different number of leaf nodes and differences in how they consume these
# input layers. Matching them will require sophisticated hackery to recreate
# the new layers with the original input structure.
# Given our existing transforms, we can assume they match.
if len(original_leaf_layers) != len(replacement_leaf_layers):
raise RuntimeError('Different size of leaf layers not supported yet.')
for original_inbound_nodes, replacement_leaf_layer in zip(
original_inbound_nodes, replacement_leaf_layers):
replacement_leaf_layer['inbound_nodes'] = original_inbound_nodes
# 4. Remove the original matched layers
layers_to_remove_names = self._get_layer_names(match_layer_node)
layers_to_remove = self._get_layers(layers_to_remove_names)
self._remove_layers(layers_to_remove, layers_to_remove_names)
# 5. Add in the new layers.
def _add_replacement_layer(layer_node):
"""Recursively add new layers."""
self._config['layers'].append(layer_node.layer)
layer_name = layer_node.layer['config']['name']
# TODO(b/184603494): Remove weight map structure from model_transformer.
if layer_node.weights:
self._layer_weights_map[layer_name] = layer_node.weights
if layer_node.names_and_weights:
self._layer_names_and_weights_map[
layer_name] = layer_node.names_and_weights
if layer_node.metadata:
self._layer_metadata_map[layer_name] = layer_node.metadata
if self.candidate_layers:
self.candidate_layers.add(layer_name)
for input_layer in layer_node.input_layers:
_add_replacement_layer(input_layer)
_add_replacement_layer(replacement_layer_node)
def _replace_sequential(self, match_layer_node, replacement_layer_node):
"""Sequential model: replace the chain of match_layer_node with replacement_layer_node."""
# 1. Remove the original matched layers.
layers_to_remove_names = self._get_layer_names(match_layer_node)
layers_to_remove = self._get_layers(layers_to_remove_names)
# These variables are needed when adding the new layers
# and must be set before _remove_layers removes them.
first_layer_removed = layers_to_remove[0]
first_layer_removed_index = self._config['layers'].index(
first_layer_removed)
self._remove_layers(layers_to_remove, layers_to_remove_names)
# 2. Add in the new layers.
def _get_replacement_nodes(replacement_node):
"""Get list of replacement nodes in Sequential order."""
replacement_nodes = []
for input_layer in replacement_node.input_layers:
replacement_nodes.extend(_get_replacement_nodes(input_layer))
replacement_nodes.append(replacement_node)
return replacement_nodes
def _add_replacement_nodes(first_layer_removed_index, replacement_nodes):
"""Add replacement nodes to Sequential model."""
# Potentially insert nodes into middle of model.
i = first_layer_removed_index
for replacement_node in replacement_nodes:
self._config['layers'].insert(i, replacement_node.layer)
layer_name = replacement_node.layer['config']['name']
if replacement_node.weights:
self._layer_weights_map[layer_name] = replacement_node.weights
if replacement_node.names_and_weights:
self._layer_names_and_weights_map[
layer_name] = replacement_node.names_and_weights
if replacement_node.metadata:
self._layer_metadata_map[layer_name] = replacement_node.metadata
if self.candidate_layers:
self.candidate_layers.add(layer_name)
i += 1
replacement_nodes = _get_replacement_nodes(replacement_layer_node)
_add_replacement_nodes(first_layer_removed_index, replacement_nodes)
def _weight_name(self, name):
"""Extracts the weight name by removing layer from TF variable name.
For example, returns 'kernel:0' for 'dense_2/kernel:0'.
Args:
name: TensorFlow variable name.
Returns:
Extracted weight name.
"""
return name.split('/')[-1]
def _get_keras_layer_weights(self, keras_layer):
"""Returns a map of weight name, weight matrix. Keeps keras ordering."""
weights_map = collections.OrderedDict()
for weight_tensor, weight_numpy in \
zip(keras_layer.weights, keras_layer.get_weights()):
weights_map[self._weight_name(weight_tensor.name)] = weight_numpy
if len(weights_map) != len(keras_layer.weights):
# The case that variable identifier is not unique. It's a fallback that
# uses weight list instead of the weights map.
return None
return weights_map
def _get_keras_layer_names_and_weights(self, keras_layer):
return zip([weight.name for weight in keras_layer.weights],
keras_layer.get_weights())
def | _set_layer_weights | identifier_name |
|
generate_samples.py | = True
# choose objects from the list; make visible, move and rotate
objects_order = list(range(len(objects)))
random.shuffle(objects_order)
n_objects = random.randint(1,len(objects)-1)
obj_center = np.asarray([0,0], 'float32')
for i in range(n_objects):
obj = objects[objects_order[i]]
## unhide
obj.hide_render= False
for child in getChildren([obj]):
child.hide_render = False
## random rotation
rotation_min = obj['rotation_range'][0]
rotation_range = obj['rotation_range'][1] - rotation_min
rotation_angle = rotation_min + rotation_range*random.random()
obj.rotation_euler[2] = radians(rotation_angle)
## position
pos = object_positions[i]
obj.location[0] = pos[0]
obj.location[1] = pos[1]
obj_center+=pos
# center_obj = objects[objects_order[0]]
obj_center/=n_objects
# Ground Texture to background for more realistic reflections
bpy.data.images["ground.jpg"].filepath = bg_img_path
return obj_center, bg_size
def cyclic_arangement(objects, camera, cam_dist, step, step_count, img_list):
## hides, reveals and rotates the objects and moves the camera so every object is visible for the same amount of images from a diverse range of viewpoints
MAX_CAM_STEPS = 20 ## the max amount of camera steps to go from the lowest to the highest position and start again. lowest and highest position are defined by cam_pos_range
BACKGROUND_REFLECTIONS = True
# Image texture onto background for more realistic reflections
if BACKGROUND_REFLECTIONS:
bg_img_path = img_list[step % len(img_list)-1] # unrandomized
bpy.data.images["ground.jpg"].filepath = bg_img_path
# hide all objects
for obj in objects:
obj.hide_render = True
# obj.hide = True
for child in getChildren(objects):
child.hide_render = True
# get the current object when every object should be visible in the same number of render steps
# (step_count/len(objects) is the number of steps for each object)
steps_per_obj = step_count/len(objects)
obj = objects[int(step/steps_per_obj)]
# visibility and rotation of object
obj.hide_render = False
for child in getChildren([obj]):
child.hide_render = False
# obj.hide = False
rotation_min = obj['rotation_range'][0]
rotation_range = obj['rotation_range'][1] - rotation_min
rotation_angle = rotation_min + (step % steps_per_obj) * (rotation_range / steps_per_obj)
obj.rotation_euler[2] = radians(rotation_angle)
# cam placement
cam_steps = min(steps_per_obj, MAX_CAM_STEPS)
cam_pos_min = obj['cam_pos_range'][0]
cam_pos_range = obj['cam_pos_range'][1] - cam_pos_min
camera.location[0] = cam_dist*cos(radians(cam_pos_min + (step%cam_steps)*cam_pos_range/cam_steps))
camera.location[1] = 0
camera.location[2] = cam_dist*sin(radians(cam_pos_min + (step%cam_steps)*cam_pos_range/cam_steps))
def random_cam_placement(camera, focus, target_obj):
# not used in current version
## places the camera randomly
x_range = camera["x_range"] #[.6,3.0]
y_range = camera["y_range"] #[-0.25,0.25]
z_range = camera["z_range"] #[.45,1.60]
# x_rot_range = camera["x_rot_range"] #[-0.15,0.15]
# y_rot_range = camera["y_rot_range"] #[-0.15,0.15]
# z_rot_range = camera["z_rot_range"] #[-0.15,0.15]
rand_pos = np.random.rand(3)
# rand_rot = np.random.rand(3)
camera.location[0] =x_range[1]-(x_range[1]-x_range[0])*rand_pos[0]
camera.location[1] =y_range[1]-(y_range[1]-y_range[0])*rand_pos[1]
camera.location[2] =z_range[1]-(z_range[1]-z_range[0])*rand_pos[2]
# place the camera target
target_obj.location[0] = focus[0]
target_obj.location[1] = focus[1]
def shape_key_adjustments(objects):
## iterates all shape keys of all objects and sets them to a random value
for obj in objects:
if obj.data.shape_keys:
keys = obj.data.shape_keys.key_blocks
if len(keys):
for i, k in enumerate(keys):
if i:
k.value = random.random()
def texture_adjustments():
## iterates all materials in the blender file and applies random adjustments based on naming conventions
textures = bpy.data.materials #['rand_plastic']
for t in textures:
# random color for rand
if "rand" in t.name:
tex_nodes = t.node_tree.nodes
for n in tex_nodes:
if "RGB" in n.name:
if random.random() < .3: #30% chance for grey else random color
|
else:
n.outputs[0].default_value = [random.random(), random.random(), random.random(), 1]
if "rand" in n.name:
n.outputs[0].default_value = random.random()
if "switch" in n.name:
n.outputs[0].default_value = random.randint(0,1)
# random shift for shift
if "shift" in t.name:
tex_nodes = t.node_tree.nodes
for n in tex_nodes:
if "Mapping" in n.name:
n.translation = [random.random()*10,random.random()*10,0]
# random mix for mix
if "mix" in t.name:
tex_nodes = t.node_tree.nodes
for n in tex_nodes:
if "noise_mix" in n.name:
n.inputs[0].default_value = .35 + random.random()*.65
def save_label(output_path, bg_size, objects, bg_img_path=None, read_classes=True, segmentation=False):
if segmentation:
## Save the segmentation image
classImg = np.array(bpy.data.images['Viewer Node'].pixels[:]).reshape(bg_size[0],bg_size[1],-1)
# One value per pixel
classImg = classImg[::-1,:,0]
# classImg = np.array( [ [ pixel[0] for pixel in row ] for row in classImg ] )
if not os.path.exists(output_path):
os.makedirs(output_path)
misc.toimage(classImg, cmin=0, cmax=255).save(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.png')
# misc.imsave(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.png', classImg)
else:
f_label = open(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.txt', 'w')
if read_classes and os.path.isfile(bg_img_path[:-4] + '.txt'):
bg_annotation = open(bg_img_path[:-4] + '.txt', 'r')
f_label.write(bg_annotation.read())
bg_annotation.close()
classImg = np.array(bpy.data.images['Viewer Node'].pixels[:]).reshape(bg_size[1],bg_size[0],-1)
# One value per pixel
classImg = classImg[::-1,:,0]
# YOLO style boundingboxes
for i in range(len(objects)):
# Finding non zero values
mask = (classImg == i+1)
rows = np.any(mask, axis=1)
cols = np.any(mask, axis=0)
if rows.any():
# min and max indices for bounding box
ymin, ymax = np.where(rows)[0][[0, -1]]
xmin, xmax = np.where(cols)[0][[0, -1]]
print(ymin, ymax, xmin, xmax)
x = ((xmin + xmax)/2)/ bg_size[0]
width = (xmax - xmin) / bg_size[0]
y = ((ymin + ymax)/2)/ bg_size[1]
height = (ymax - ymin) / bg_size[1]
if (width*height)>.005:
print("{} {} {} {} {}".format(objects[i]['class'],x,y,width,height))
print("{} {} {} {} {}".format(objects[i]['class'],x,y,width,height), file = f_label)
f_label.close()
def main():
# SEGMENTATION = bpy.context.scene.sg_label_mode == "sgSegment"
# RENDER_CROPPED = bpy.context.scene.sg_render_mode == "sgCropped"
SEGMENTATION = False
RENDER_CROPPED = True
cam = bpy.context.scene.sg_cam
objects = bpy.context.scene | c=random.random()
n.outputs[0].default_value = [c, c, c, 1] | conditional_block |
generate_samples.py | # Image texture onto background for more realistic reflections
if BACKGROUND_REFLECTIONS:
bg_img_path = img_list[step % len(img_list)-1] # unrandomized
bpy.data.images["ground.jpg"].filepath = bg_img_path
# hide all objects
for obj in objects:
obj.hide_render = True
# obj.hide = True
for child in getChildren(objects):
child.hide_render = True
# get the current object when every object should be visible in the same number of render steps
# (step_count/len(objects) is the number of steps for each object)
steps_per_obj = step_count/len(objects)
obj = objects[int(step/steps_per_obj)]
# visibility and rotation of object
obj.hide_render = False
for child in getChildren([obj]):
child.hide_render = False
# obj.hide = False
rotation_min = obj['rotation_range'][0]
rotation_range = obj['rotation_range'][1] - rotation_min
rotation_angle = rotation_min + (step % steps_per_obj) * (rotation_range / steps_per_obj)
obj.rotation_euler[2] = radians(rotation_angle)
# cam placement
cam_steps = min(steps_per_obj, MAX_CAM_STEPS)
cam_pos_min = obj['cam_pos_range'][0]
cam_pos_range = obj['cam_pos_range'][1] - cam_pos_min
camera.location[0] = cam_dist*cos(radians(cam_pos_min + (step%cam_steps)*cam_pos_range/cam_steps))
camera.location[1] = 0
camera.location[2] = cam_dist*sin(radians(cam_pos_min + (step%cam_steps)*cam_pos_range/cam_steps))
def random_cam_placement(camera, focus, target_obj):
# not used in current version
## places the camera randomly
x_range = camera["x_range"] #[.6,3.0]
y_range = camera["y_range"] #[-0.25,0.25]
z_range = camera["z_range"] #[.45,1.60]
# x_rot_range = camera["x_rot_range"] #[-0.15,0.15]
# y_rot_range = camera["y_rot_range"] #[-0.15,0.15]
# z_rot_range = camera["z_rot_range"] #[-0.15,0.15]
rand_pos = np.random.rand(3)
# rand_rot = np.random.rand(3)
camera.location[0] =x_range[1]-(x_range[1]-x_range[0])*rand_pos[0]
camera.location[1] =y_range[1]-(y_range[1]-y_range[0])*rand_pos[1]
camera.location[2] =z_range[1]-(z_range[1]-z_range[0])*rand_pos[2]
# place the camera target
target_obj.location[0] = focus[0]
target_obj.location[1] = focus[1]
def shape_key_adjustments(objects):
## iterates all shape keys of all objects and sets them to a random value
for obj in objects:
if obj.data.shape_keys:
keys = obj.data.shape_keys.key_blocks
if len(keys):
for i, k in enumerate(keys):
if i:
k.value = random.random()
def texture_adjustments():
## iterates all materials in the blender file and applies random adjustments based on naming conventions
textures = bpy.data.materials #['rand_plastic']
for t in textures:
# random color for rand
if "rand" in t.name:
tex_nodes = t.node_tree.nodes
for n in tex_nodes:
if "RGB" in n.name:
if random.random() < .3: #30% chance for grey else random color
c=random.random()
n.outputs[0].default_value = [c, c, c, 1]
else:
n.outputs[0].default_value = [random.random(), random.random(), random.random(), 1]
if "rand" in n.name:
n.outputs[0].default_value = random.random()
if "switch" in n.name:
n.outputs[0].default_value = random.randint(0,1)
# random shift for shift
if "shift" in t.name:
tex_nodes = t.node_tree.nodes
for n in tex_nodes:
if "Mapping" in n.name:
n.translation = [random.random()*10,random.random()*10,0]
# random mix for mix
if "mix" in t.name:
tex_nodes = t.node_tree.nodes
for n in tex_nodes:
if "noise_mix" in n.name:
n.inputs[0].default_value = .35 + random.random()*.65
def save_label(output_path, bg_size, objects, bg_img_path=None, read_classes=True, segmentation=False):
if segmentation:
## Save the segmentation image
classImg = np.array(bpy.data.images['Viewer Node'].pixels[:]).reshape(bg_size[0],bg_size[1],-1)
# One value per pixel
classImg = classImg[::-1,:,0]
# classImg = np.array( [ [ pixel[0] for pixel in row ] for row in classImg ] )
if not os.path.exists(output_path):
os.makedirs(output_path)
misc.toimage(classImg, cmin=0, cmax=255).save(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.png')
# misc.imsave(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.png', classImg)
else:
f_label = open(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.txt', 'w')
if read_classes and os.path.isfile(bg_img_path[:-4] + '.txt'):
bg_annotation = open(bg_img_path[:-4] + '.txt', 'r')
f_label.write(bg_annotation.read())
bg_annotation.close()
classImg = np.array(bpy.data.images['Viewer Node'].pixels[:]).reshape(bg_size[1],bg_size[0],-1)
# One value per pixel
classImg = classImg[::-1,:,0]
# YOLO style boundingboxes
for i in range(len(objects)):
# Finding non zero values
mask = (classImg == i+1)
rows = np.any(mask, axis=1)
cols = np.any(mask, axis=0)
if rows.any():
# min and max indices for bounding box
ymin, ymax = np.where(rows)[0][[0, -1]]
xmin, xmax = np.where(cols)[0][[0, -1]]
print(ymin, ymax, xmin, xmax)
x = ((xmin + xmax)/2)/ bg_size[0]
width = (xmax - xmin) / bg_size[0]
y = ((ymin + ymax)/2)/ bg_size[1]
height = (ymax - ymin) / bg_size[1]
if (width*height)>.005:
print("{} {} {} {} {}".format(objects[i]['class'],x,y,width,height))
print("{} {} {} {} {}".format(objects[i]['class'],x,y,width,height), file = f_label)
f_label.close()
def main():
# SEGMENTATION = bpy.context.scene.sg_label_mode == "sgSegment"
# RENDER_CROPPED = bpy.context.scene.sg_render_mode == "sgCropped"
SEGMENTATION = False
RENDER_CROPPED = True
cam = bpy.context.scene.sg_cam
objects = bpy.context.scene.sg_objectGroup.objects
# objects = [bpy.data.objects[obj] for obj in objectsList]
sun = bpy.context.scene.sg_sun
ground = bpy.context.scene.sg_ground
lamp_sun = bpy.data.lamps[sun.name]
cam_target = bpy.context.scene.sg_cam_target
tree_nodes = bpy.context.scene.node_tree.nodes
bg_size = bpy.context.scene.sg_img_size
cam_dist = bpy.context.scene.sg_cam_dist
compositing_node_group = bpy.data.scenes["Scene"].node_tree
step_count = bpy.context.scene.sg_nSamples
bg_path = bpy.context.scene.sg_backgroundPath.replace("//","")
output_path = tree_nodes['File Output'].base_path.replace('//','./')
img_list = sorted(glob.glob(bg_path+"*.png")+glob.glob(bg_path+"*.jpg"))
if RENDER_CROPPED:
output_path += 'rgba/'
elif SEGMENTATION:
output_path+="SegmentationClass/"
else:
output_path+="rgb/"
# Initial settings
ground.cycles.is_shadow_catcher = True
for i, o in enumerate(objects):
if not 'class' in o:
print(o.name, 'has no class yet. Set to 1.')
o["class"] = 1
if not 'rotation_range' in o: | print(o.name, 'has no rotation range yet. Set to [0,360].')
o["rotation_range"] = (0,360)
if not 'cam_pos_range' in o:
print(o.name, 'has no camera position range yet. Set to [0,90].') | random_line_split |
|
generate_samples.py | = True
# choose objects from the list; make visible, move and rotate
objects_order = list(range(len(objects)))
random.shuffle(objects_order)
n_objects = random.randint(1,len(objects)-1)
obj_center = np.asarray([0,0], 'float32')
for i in range(n_objects):
obj = objects[objects_order[i]]
## unhide
obj.hide_render= False
for child in getChildren([obj]):
child.hide_render = False
## random rotation
rotation_min = obj['rotation_range'][0]
rotation_range = obj['rotation_range'][1] - rotation_min
rotation_angle = rotation_min + rotation_range*random.random()
obj.rotation_euler[2] = radians(rotation_angle)
## position
pos = object_positions[i]
obj.location[0] = pos[0]
obj.location[1] = pos[1]
obj_center+=pos
# center_obj = objects[objects_order[0]]
obj_center/=n_objects
# Ground Texture to background for more realistic reflections
bpy.data.images["ground.jpg"].filepath = bg_img_path
return obj_center, bg_size
def cyclic_arangement(objects, camera, cam_dist, step, step_count, img_list):
## hides, reveals and rotates the objects and moves the camera so every object is visible for the same amount of images from a diverse range of viewpoints
MAX_CAM_STEPS = 20 ## the max amount of camera steps to go from the lowest to the highest position and start again. lowest and highest position are defined by cam_pos_range
BACKGROUND_REFLECTIONS = True
# Image texture onto background for more realistic reflections
if BACKGROUND_REFLECTIONS:
bg_img_path = img_list[step % len(img_list)-1] # unrandomized
bpy.data.images["ground.jpg"].filepath = bg_img_path
# hide all objects
for obj in objects:
obj.hide_render = True
# obj.hide = True
for child in getChildren(objects):
child.hide_render = True
# get the current object when every object should be visible in the same number of render steps
# (step_count/len(objects) is the number of steps for each object)
steps_per_obj = step_count/len(objects)
obj = objects[int(step/steps_per_obj)]
# visibility and rotation of object
obj.hide_render = False
for child in getChildren([obj]):
child.hide_render = False
# obj.hide = False
rotation_min = obj['rotation_range'][0]
rotation_range = obj['rotation_range'][1] - rotation_min
rotation_angle = rotation_min + (step % steps_per_obj) * (rotation_range / steps_per_obj)
obj.rotation_euler[2] = radians(rotation_angle)
# cam placement
cam_steps = min(steps_per_obj, MAX_CAM_STEPS)
cam_pos_min = obj['cam_pos_range'][0]
cam_pos_range = obj['cam_pos_range'][1] - cam_pos_min
camera.location[0] = cam_dist*cos(radians(cam_pos_min + (step%cam_steps)*cam_pos_range/cam_steps))
camera.location[1] = 0
camera.location[2] = cam_dist*sin(radians(cam_pos_min + (step%cam_steps)*cam_pos_range/cam_steps))
def random_cam_placement(camera, focus, target_obj):
# not used in current version
## places the camera randomly
x_range = camera["x_range"] #[.6,3.0]
y_range = camera["y_range"] #[-0.25,0.25]
z_range = camera["z_range"] #[.45,1.60]
# x_rot_range = camera["x_rot_range"] #[-0.15,0.15]
# y_rot_range = camera["y_rot_range"] #[-0.15,0.15]
# z_rot_range = camera["z_rot_range"] #[-0.15,0.15]
rand_pos = np.random.rand(3)
# rand_rot = np.random.rand(3)
camera.location[0] =x_range[1]-(x_range[1]-x_range[0])*rand_pos[0]
camera.location[1] =y_range[1]-(y_range[1]-y_range[0])*rand_pos[1]
camera.location[2] =z_range[1]-(z_range[1]-z_range[0])*rand_pos[2]
# place the camera target
target_obj.location[0] = focus[0]
target_obj.location[1] = focus[1]
def shape_key_adjustments(objects):
## iterates all shape keys of all objects and sets them to a random value
for obj in objects:
if obj.data.shape_keys:
keys = obj.data.shape_keys.key_blocks
if len(keys):
for i, k in enumerate(keys):
if i:
k.value = random.random()
def | ():
## iterates all materials in the blender file and applies random adjustments based on naming conventions
textures = bpy.data.materials #['rand_plastic']
for t in textures:
# random color for rand
if "rand" in t.name:
tex_nodes = t.node_tree.nodes
for n in tex_nodes:
if "RGB" in n.name:
if random.random() < .3: #30% chance for grey else random color
c=random.random()
n.outputs[0].default_value = [c, c, c, 1]
else:
n.outputs[0].default_value = [random.random(), random.random(), random.random(), 1]
if "rand" in n.name:
n.outputs[0].default_value = random.random()
if "switch" in n.name:
n.outputs[0].default_value = random.randint(0,1)
# random shift for shift
if "shift" in t.name:
tex_nodes = t.node_tree.nodes
for n in tex_nodes:
if "Mapping" in n.name:
n.translation = [random.random()*10,random.random()*10,0]
# random mix for mix
if "mix" in t.name:
tex_nodes = t.node_tree.nodes
for n in tex_nodes:
if "noise_mix" in n.name:
n.inputs[0].default_value = .35 + random.random()*.65
def save_label(output_path, bg_size, objects, bg_img_path=None, read_classes=True, segmentation=False):
if segmentation:
## Save the segmentation image
classImg = np.array(bpy.data.images['Viewer Node'].pixels[:]).reshape(bg_size[0],bg_size[1],-1)
# One value per pixel
classImg = classImg[::-1,:,0]
# classImg = np.array( [ [ pixel[0] for pixel in row ] for row in classImg ] )
if not os.path.exists(output_path):
os.makedirs(output_path)
misc.toimage(classImg, cmin=0, cmax=255).save(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.png')
# misc.imsave(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.png', classImg)
else:
f_label = open(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.txt', 'w')
if read_classes and os.path.isfile(bg_img_path[:-4] + '.txt'):
bg_annotation = open(bg_img_path[:-4] + '.txt', 'r')
f_label.write(bg_annotation.read())
bg_annotation.close()
classImg = np.array(bpy.data.images['Viewer Node'].pixels[:]).reshape(bg_size[1],bg_size[0],-1)
# One value per pixel
classImg = classImg[::-1,:,0]
# YOLO style boundingboxes
for i in range(len(objects)):
# Finding non zero values
mask = (classImg == i+1)
rows = np.any(mask, axis=1)
cols = np.any(mask, axis=0)
if rows.any():
# min and max indices for bounding box
ymin, ymax = np.where(rows)[0][[0, -1]]
xmin, xmax = np.where(cols)[0][[0, -1]]
print(ymin, ymax, xmin, xmax)
x = ((xmin + xmax)/2)/ bg_size[0]
width = (xmax - xmin) / bg_size[0]
y = ((ymin + ymax)/2)/ bg_size[1]
height = (ymax - ymin) / bg_size[1]
if (width*height)>.005:
print("{} {} {} {} {}".format(objects[i]['class'],x,y,width,height))
print("{} {} {} {} {}".format(objects[i]['class'],x,y,width,height), file = f_label)
f_label.close()
def main():
# SEGMENTATION = bpy.context.scene.sg_label_mode == "sgSegment"
# RENDER_CROPPED = bpy.context.scene.sg_render_mode == "sgCropped"
SEGMENTATION = False
RENDER_CROPPED = True
cam = bpy.context.scene.sg_cam
objects = bpy.context.scene | texture_adjustments | identifier_name |
generate_samples.py | = True
# choose objects from the list; make visible, move and rotate
objects_order = list(range(len(objects)))
random.shuffle(objects_order)
n_objects = random.randint(1,len(objects)-1)
obj_center = np.asarray([0,0], 'float32')
for i in range(n_objects):
obj = objects[objects_order[i]]
## unhide
obj.hide_render= False
for child in getChildren([obj]):
child.hide_render = False
## random rotation
rotation_min = obj['rotation_range'][0]
rotation_range = obj['rotation_range'][1] - rotation_min
rotation_angle = rotation_min + rotation_range*random.random()
obj.rotation_euler[2] = radians(rotation_angle)
## position
pos = object_positions[i]
obj.location[0] = pos[0]
obj.location[1] = pos[1]
obj_center+=pos
# center_obj = objects[objects_order[0]]
obj_center/=n_objects
# Ground Texture to background for more realistic reflections
bpy.data.images["ground.jpg"].filepath = bg_img_path
return obj_center, bg_size
def cyclic_arangement(objects, camera, cam_dist, step, step_count, img_list):
## hides, reveals and rotates the objects and moves the camera so every object is visible for the same amount of images from a diverse range of viewpoints
MAX_CAM_STEPS = 20 ## the max amount of camera steps to go from the lowest to the highest position and start again. lowest and highest position are defined by cam_pos_range
BACKGROUND_REFLECTIONS = True
# Image texture onto background for more realistic reflections
if BACKGROUND_REFLECTIONS:
bg_img_path = img_list[step % len(img_list)-1] # unrandomized
bpy.data.images["ground.jpg"].filepath = bg_img_path
# hide all objects
for obj in objects:
obj.hide_render = True
# obj.hide = True
for child in getChildren(objects):
child.hide_render = True
# get the current object when every object should be visible in the same number of render steps
# (step_count/len(objects) is the number of steps for each object)
steps_per_obj = step_count/len(objects)
obj = objects[int(step/steps_per_obj)]
# visibility and rotation of object
obj.hide_render = False
for child in getChildren([obj]):
child.hide_render = False
# obj.hide = False
rotation_min = obj['rotation_range'][0]
rotation_range = obj['rotation_range'][1] - rotation_min
rotation_angle = rotation_min + (step % steps_per_obj) * (rotation_range / steps_per_obj)
obj.rotation_euler[2] = radians(rotation_angle)
# cam placement
cam_steps = min(steps_per_obj, MAX_CAM_STEPS)
cam_pos_min = obj['cam_pos_range'][0]
cam_pos_range = obj['cam_pos_range'][1] - cam_pos_min
camera.location[0] = cam_dist*cos(radians(cam_pos_min + (step%cam_steps)*cam_pos_range/cam_steps))
camera.location[1] = 0
camera.location[2] = cam_dist*sin(radians(cam_pos_min + (step%cam_steps)*cam_pos_range/cam_steps))
def random_cam_placement(camera, focus, target_obj):
# not used in current version
## places the camera randomly
x_range = camera["x_range"] #[.6,3.0]
y_range = camera["y_range"] #[-0.25,0.25]
z_range = camera["z_range"] #[.45,1.60]
# x_rot_range = camera["x_rot_range"] #[-0.15,0.15]
# y_rot_range = camera["y_rot_range"] #[-0.15,0.15]
# z_rot_range = camera["z_rot_range"] #[-0.15,0.15]
rand_pos = np.random.rand(3)
# rand_rot = np.random.rand(3)
camera.location[0] =x_range[1]-(x_range[1]-x_range[0])*rand_pos[0]
camera.location[1] =y_range[1]-(y_range[1]-y_range[0])*rand_pos[1]
camera.location[2] =z_range[1]-(z_range[1]-z_range[0])*rand_pos[2]
# place the camera target
target_obj.location[0] = focus[0]
target_obj.location[1] = focus[1]
def shape_key_adjustments(objects):
## iterates all shape keys of all objects and sets them to a random value
|
def texture_adjustments():
## iterates all materials in the blender file and applies random adjustments based on naming conventions
textures = bpy.data.materials #['rand_plastic']
for t in textures:
# random color for rand
if "rand" in t.name:
tex_nodes = t.node_tree.nodes
for n in tex_nodes:
if "RGB" in n.name:
if random.random() < .3: #30% chance for grey else random color
c=random.random()
n.outputs[0].default_value = [c, c, c, 1]
else:
n.outputs[0].default_value = [random.random(), random.random(), random.random(), 1]
if "rand" in n.name:
n.outputs[0].default_value = random.random()
if "switch" in n.name:
n.outputs[0].default_value = random.randint(0,1)
# random shift for shift
if "shift" in t.name:
tex_nodes = t.node_tree.nodes
for n in tex_nodes:
if "Mapping" in n.name:
n.translation = [random.random()*10,random.random()*10,0]
# random mix for mix
if "mix" in t.name:
tex_nodes = t.node_tree.nodes
for n in tex_nodes:
if "noise_mix" in n.name:
n.inputs[0].default_value = .35 + random.random()*.65
def save_label(output_path, bg_size, objects, bg_img_path=None, read_classes=True, segmentation=False):
if segmentation:
## Save the segmentation image
classImg = np.array(bpy.data.images['Viewer Node'].pixels[:]).reshape(bg_size[0],bg_size[1],-1)
# One value per pixel
classImg = classImg[::-1,:,0]
# classImg = np.array( [ [ pixel[0] for pixel in row ] for row in classImg ] )
if not os.path.exists(output_path):
os.makedirs(output_path)
misc.toimage(classImg, cmin=0, cmax=255).save(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.png')
# misc.imsave(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.png', classImg)
else:
f_label = open(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.txt', 'w')
if read_classes and os.path.isfile(bg_img_path[:-4] + '.txt'):
bg_annotation = open(bg_img_path[:-4] + '.txt', 'r')
f_label.write(bg_annotation.read())
bg_annotation.close()
classImg = np.array(bpy.data.images['Viewer Node'].pixels[:]).reshape(bg_size[1],bg_size[0],-1)
# One value per pixel
classImg = classImg[::-1,:,0]
# YOLO style boundingboxes
for i in range(len(objects)):
# Finding non zero values
mask = (classImg == i+1)
rows = np.any(mask, axis=1)
cols = np.any(mask, axis=0)
if rows.any():
# min and max indices for bounding box
ymin, ymax = np.where(rows)[0][[0, -1]]
xmin, xmax = np.where(cols)[0][[0, -1]]
print(ymin, ymax, xmin, xmax)
x = ((xmin + xmax)/2)/ bg_size[0]
width = (xmax - xmin) / bg_size[0]
y = ((ymin + ymax)/2)/ bg_size[1]
height = (ymax - ymin) / bg_size[1]
if (width*height)>.005:
print("{} {} {} {} {}".format(objects[i]['class'],x,y,width,height))
print("{} {} {} {} {}".format(objects[i]['class'],x,y,width,height), file = f_label)
f_label.close()
def main():
# SEGMENTATION = bpy.context.scene.sg_label_mode == "sgSegment"
# RENDER_CROPPED = bpy.context.scene.sg_render_mode == "sgCropped"
SEGMENTATION = False
RENDER_CROPPED = True
cam = bpy.context.scene.sg_cam
objects = bpy.context.scene | for obj in objects:
if obj.data.shape_keys:
keys = obj.data.shape_keys.key_blocks
if len(keys):
for i, k in enumerate(keys):
if i:
k.value = random.random() | identifier_body |
kogitoapp_types.go | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Runtime"
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:label"
// +kubebuilder:validation:Enum=quarkus;springboot
Runtime RuntimeType `json:"runtime,omitempty"`
// S2I Build configuration.
// Default value: nil
Build *KogitoAppBuildObject `json:"build"`
// Kubernetes Service configuration.
// Default value: nil
Service KogitoAppServiceObject `json:"service,omitempty"`
// Annotates the pods managed by the operator with the required metadata for Istio to setup its sidecars, enabling the mesh. Defaults to false.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Enable Istio"
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch"
EnableIstio bool `json:"enableIstio,omitempty"`
// Set this property to true to tell the operator to deploy an instance of Infinispan via the Infinispan Operator and
// configure this service to connect to the deployed server.
// For Quarkus runtime, it sets QUARKUS_INFINISPAN_CLIENT_* environment variables. For Spring Boot, these variables start with SPRING_INFINISPAN_CLIENT_*.
// More info: https://github.com/kiegroup/kogito-cloud-operator#kogito-services.
// Set to false or ignore it if your service does not need persistence or if you are going to configure the persistence infrastructure yourself.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Enable Persistence"
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch"
EnablePersistence bool `json:"enablePersistence,omitempty"`
// Set this property to true to tell the operator to deploy an instance of Kafka via the Strimzi Operator and configure this service with
// the proper information to connect to the Kafka cluster.
// The Kafka cluster service endpoint will be injected in the Kogito Service container via an environment variable named "KAFKA_BOOTSTRAP_SERVERS" e.g.: kafka-kogito:9092.
// Set to false or ignore it if your service does not need messaging or if you are going to configure the messaging infrastructure yourself.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Enable Events"
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch"
EnableEvents bool `json:"enableEvents,omitempty"`
}
// GetRuntime ...
func (k *KogitoAppSpec) GetRuntime() RuntimeType {
return k.Runtime
}
// GetBuild ...
func (k *KogitoAppSpec) GetBuild() *KogitoAppBuildObject {
if k == nil {
return nil
}
return k.Build
}
// IsGitURIEmpty checks if the provided Git URI is empty or not.
func (k *KogitoAppSpec) IsGitURIEmpty() bool {
if k == nil {
return true
}
if k.Build == nil {
return true
}
return len(k.Build.GitSource.URI) == 0
}
// KogitoAppBuildObject Data to define how to build an application from source.
// +k8s:openapi-gen=true
// +operator-sdk:gen-csv:customresourcedefinitions.displayName="Kogito Service Build"
type KogitoAppBuildObject struct {
Incremental bool `json:"incremental,omitempty"`
// Environment variables used during build time.
// +listType=atomic
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Build Env Variables"
Envs []corev1.EnvVar `json:"envs,omitempty"`
// Information about the git repository where the Kogito App source code resides.
// If set, the operator will use source to image strategy build.
// +optional
GitSource GitSource `json:"gitSource,omitempty"`
// WebHook secrets for build configs.
// +listType=atomic
// +optional
Webhooks []WebhookSecret `json:"webhooks,omitempty"`
// + optional
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Images Version"
// Image version for the Kogito official images used during the build. E.g.: 0.6.0. Default to current Operator version.
ImageVersion string `json:"imageVersion,omitempty"`
// Custom image used by the source to image process to build the Kogito Service binaries. Takes precedence over ImageVersion attribute.
// + optional
// +kubebuilder:validation:Pattern=`(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9](?::[0-9]{0,5})?/([a-z0-9-]+)/([a-z0-9-]+):(([a-z0-9\.-]+))`
ImageS2ITag string `json:"imageS2ITag,omitempty"`
// Custom image used by the source to image process to build the final Kogito Service image. Takes precedence over ImageVersion attribute.
// + optional
// +kubebuilder:validation:Pattern=`(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9](?::[0-9]{0,5})?/([a-z0-9-]+)/([a-z0-9-]+):(([a-z0-9\.-]+))`
ImageRuntimeTag string `json:"imageRuntimeTag,omitempty"`
// Native indicates if the Kogito Service built should be compiled to run on native mode when Runtime is Quarkus. For more information, see https://www.graalvm.org/docs/reference-manual/aot-compilation/.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Native Build"
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch"
Native bool `json:"native,omitempty"`
// Resources for S2I builder pods.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:resourceRequirements"
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
// Internal Maven Mirror to be used during source-to-image builds to considerably increase build speed.
MavenMirrorURL string `json:"mavenMirrorURL,omitempty"`
// Artifact contains override information for building the Maven artifact.
// + optional
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Final Artifact"
Artifact Artifact `json:"artifact,omitempty"`
// If set to true will print the logs for downloading/uploading of maven dependencies. Defaults to false.
// + optional
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch"
EnableMavenDownloadOutput bool `json:"enableMavenDownloadOutput,omitempty"`
}
// AddEnvironmentVariable adds new environment variable to build environment variables.
func (k *KogitoAppBuildObject) AddEnvironmentVariable(name, value string) {
env := corev1.EnvVar{
Name: name,
Value: value,
}
k.Envs = append(k.Envs, env)
}
// AddResourceRequest adds new resource request. Works also on an uninitialized Requests field.
func (k *KogitoAppBuildObject) AddResourceRequest(name, value string) {
if k.Resources.Requests == nil {
k.Resources.Requests = corev1.ResourceList{}
|
// The name of the runtime used, either Quarkus or SpringBoot.
// Default value: quarkus.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true | random_line_split |
|
kogitoapp_types.go | are going to configure the persistence infrastructure yourself.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Enable Persistence"
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch"
EnablePersistence bool `json:"enablePersistence,omitempty"`
// Set this property to true to tell the operator to deploy an instance of Kafka via the Strimzi Operator and configure this service with
// the proper information to connect to the Kafka cluster.
// The Kafka cluster service endpoint will be injected in the Kogito Service container via an environment variable named "KAFKA_BOOTSTRAP_SERVERS" e.g.: kafka-kogito:9092.
// Set to false or ignore it if your service does not need messaging or if you are going to configure the messaging infrastructure yourself.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Enable Events"
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch"
EnableEvents bool `json:"enableEvents,omitempty"`
}
// GetRuntime ...
func (k *KogitoAppSpec) GetRuntime() RuntimeType {
return k.Runtime
}
// GetBuild ...
func (k *KogitoAppSpec) GetBuild() *KogitoAppBuildObject {
if k == nil {
return nil
}
return k.Build
}
// IsGitURIEmpty checks if the provided Git URI is empty or not.
func (k *KogitoAppSpec) IsGitURIEmpty() bool {
if k == nil {
return true
}
if k.Build == nil {
return true
}
return len(k.Build.GitSource.URI) == 0
}
// KogitoAppBuildObject Data to define how to build an application from source.
// +k8s:openapi-gen=true
// +operator-sdk:gen-csv:customresourcedefinitions.displayName="Kogito Service Build"
type KogitoAppBuildObject struct {
Incremental bool `json:"incremental,omitempty"`
// Environment variables used during build time.
// +listType=atomic
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Build Env Variables"
Envs []corev1.EnvVar `json:"envs,omitempty"`
// Information about the git repository where the Kogito App source code resides.
// If set, the operator will use source to image strategy build.
// +optional
GitSource GitSource `json:"gitSource,omitempty"`
// WebHook secrets for build configs.
// +listType=atomic
// +optional
Webhooks []WebhookSecret `json:"webhooks,omitempty"`
// + optional
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Images Version"
// Image version for the Kogito official images used during the build. E.g.: 0.6.0. Default to current Operator version.
ImageVersion string `json:"imageVersion,omitempty"`
// Custom image used by the source to image process to build the Kogito Service binaries. Takes precedence over ImageVersion attribute.
// + optional
// +kubebuilder:validation:Pattern=`(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9](?::[0-9]{0,5})?/([a-z0-9-]+)/([a-z0-9-]+):(([a-z0-9\.-]+))`
ImageS2ITag string `json:"imageS2ITag,omitempty"`
// Custom image used by the source to image process to build the final Kogito Service image. Takes precedence over ImageVersion attribute.
// + optional
// +kubebuilder:validation:Pattern=`(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9](?::[0-9]{0,5})?/([a-z0-9-]+)/([a-z0-9-]+):(([a-z0-9\.-]+))`
ImageRuntimeTag string `json:"imageRuntimeTag,omitempty"`
// Native indicates if the Kogito Service built should be compiled to run on native mode when Runtime is Quarkus. For more information, see https://www.graalvm.org/docs/reference-manual/aot-compilation/.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Native Build"
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch"
Native bool `json:"native,omitempty"`
// Resources for S2I builder pods.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:resourceRequirements"
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
// Internal Maven Mirror to be used during source-to-image builds to considerably increase build speed.
MavenMirrorURL string `json:"mavenMirrorURL,omitempty"`
// Artifact contains override information for building the Maven artifact.
// + optional
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Final Artifact"
Artifact Artifact `json:"artifact,omitempty"`
// If set to true will print the logs for downloading/uploading of maven dependencies. Defaults to false.
// + optional
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch"
EnableMavenDownloadOutput bool `json:"enableMavenDownloadOutput,omitempty"`
}
// AddEnvironmentVariable adds new environment variable to build environment variables.
func (k *KogitoAppBuildObject) AddEnvironmentVariable(name, value string) {
env := corev1.EnvVar{
Name: name,
Value: value,
}
k.Envs = append(k.Envs, env)
}
// AddResourceRequest adds new resource request. Works also on an uninitialized Requests field.
func (k *KogitoAppBuildObject) AddResourceRequest(name, value string) {
if k.Resources.Requests == nil {
k.Resources.Requests = corev1.ResourceList{}
}
k.Resources.Requests[corev1.ResourceName(name)] = resource.MustParse(value)
}
// AddResourceLimit adds new resource limit. Works also on an uninitialized Limits field.
func (k *KogitoAppBuildObject) | (name, value string) {
if k.Resources.Limits == nil {
k.Resources.Limits = corev1.ResourceList{}
}
k.Resources.Limits[corev1.ResourceName(name)] = resource.MustParse(value)
}
// KogitoAppServiceObject Data to define the service of the Kogito application.
// +k8s:openapi-gen=true
type KogitoAppServiceObject struct {
// Labels for the application service.
Labels map[string]string `json:"labels,omitempty"`
}
// GitSource Git coordinates to locate the source code to build.
// +k8s:openapi-gen=true
// +operator-sdk:gen-csv:customresourcedefinitions.displayName="Kogito Git Source"
type GitSource struct {
// Git URI for the s2i source.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Git URI"
URI string `json:"uri"`
// Branch to use in the Git repository.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Git Reference"
Reference string `json:"reference,omitempty"`
// Context/subdirectory where the code is located, relative to the repo root.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Git Context"
ContextDir string `json:"contextDir,omitempty"`
}
// WebhookType literal type to distinguish between different types of webhooks.
type WebhookType string
const (
// GitHubWebhook GitHub webhook.
| AddResourceLimit | identifier_name |
kogitoapp_types.go | are going to configure the persistence infrastructure yourself.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Enable Persistence"
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch"
EnablePersistence bool `json:"enablePersistence,omitempty"`
// Set this property to true to tell the operator to deploy an instance of Kafka via the Strimzi Operator and configure this service with
// the proper information to connect to the Kafka cluster.
// The Kafka cluster service endpoint will be injected in the Kogito Service container via an environment variable named "KAFKA_BOOTSTRAP_SERVERS" e.g.: kafka-kogito:9092.
// Set to false or ignore it if your service does not need messaging or if you are going to configure the messaging infrastructure yourself.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Enable Events"
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch"
EnableEvents bool `json:"enableEvents,omitempty"`
}
// GetRuntime ...
func (k *KogitoAppSpec) GetRuntime() RuntimeType {
return k.Runtime
}
// GetBuild ...
func (k *KogitoAppSpec) GetBuild() *KogitoAppBuildObject {
if k == nil {
return nil
}
return k.Build
}
// IsGitURIEmpty checks if the provided Git URI is empty or not.
func (k *KogitoAppSpec) IsGitURIEmpty() bool {
if k == nil {
return true
}
if k.Build == nil {
return true
}
return len(k.Build.GitSource.URI) == 0
}
// KogitoAppBuildObject Data to define how to build an application from source.
// +k8s:openapi-gen=true
// +operator-sdk:gen-csv:customresourcedefinitions.displayName="Kogito Service Build"
type KogitoAppBuildObject struct {
Incremental bool `json:"incremental,omitempty"`
// Environment variables used during build time.
// +listType=atomic
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Build Env Variables"
Envs []corev1.EnvVar `json:"envs,omitempty"`
// Information about the git repository where the Kogito App source code resides.
// If set, the operator will use source to image strategy build.
// +optional
GitSource GitSource `json:"gitSource,omitempty"`
// WebHook secrets for build configs.
// +listType=atomic
// +optional
Webhooks []WebhookSecret `json:"webhooks,omitempty"`
// + optional
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Images Version"
// Image version for the Kogito official images used during the build. E.g.: 0.6.0. Default to current Operator version.
ImageVersion string `json:"imageVersion,omitempty"`
// Custom image used by the source to image process to build the Kogito Service binaries. Takes precedence over ImageVersion attribute.
// + optional
// +kubebuilder:validation:Pattern=`(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9](?::[0-9]{0,5})?/([a-z0-9-]+)/([a-z0-9-]+):(([a-z0-9\.-]+))`
ImageS2ITag string `json:"imageS2ITag,omitempty"`
// Custom image used by the source to image process to build the final Kogito Service image. Takes precedence over ImageVersion attribute.
// + optional
// +kubebuilder:validation:Pattern=`(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9](?::[0-9]{0,5})?/([a-z0-9-]+)/([a-z0-9-]+):(([a-z0-9\.-]+))`
ImageRuntimeTag string `json:"imageRuntimeTag,omitempty"`
// Native indicates if the Kogito Service built should be compiled to run on native mode when Runtime is Quarkus. For more information, see https://www.graalvm.org/docs/reference-manual/aot-compilation/.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Native Build"
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch"
Native bool `json:"native,omitempty"`
// Resources for S2I builder pods.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:resourceRequirements"
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
// Internal Maven Mirror to be used during source-to-image builds to considerably increase build speed.
MavenMirrorURL string `json:"mavenMirrorURL,omitempty"`
// Artifact contains override information for building the Maven artifact.
// + optional
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Final Artifact"
Artifact Artifact `json:"artifact,omitempty"`
// If set to true will print the logs for downloading/uploading of maven dependencies. Defaults to false.
// + optional
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch"
EnableMavenDownloadOutput bool `json:"enableMavenDownloadOutput,omitempty"`
}
// AddEnvironmentVariable adds new environment variable to build environment variables.
func (k *KogitoAppBuildObject) AddEnvironmentVariable(name, value string) {
env := corev1.EnvVar{
Name: name,
Value: value,
}
k.Envs = append(k.Envs, env)
}
// AddResourceRequest adds new resource request. Works also on an uninitialized Requests field.
func (k *KogitoAppBuildObject) AddResourceRequest(name, value string) {
if k.Resources.Requests == nil {
k.Resources.Requests = corev1.ResourceList{}
}
k.Resources.Requests[corev1.ResourceName(name)] = resource.MustParse(value)
}
// AddResourceLimit adds new resource limit. Works also on an uninitialized Limits field.
func (k *KogitoAppBuildObject) AddResourceLimit(name, value string) {
if k.Resources.Limits == nil |
k.Resources.Limits[corev1.ResourceName(name)] = resource.MustParse(value)
}
// KogitoAppServiceObject Data to define the service of the Kogito application.
// +k8s:openapi-gen=true
type KogitoAppServiceObject struct {
// Labels for the application service.
Labels map[string]string `json:"labels,omitempty"`
}
// GitSource Git coordinates to locate the source code to build.
// +k8s:openapi-gen=true
// +operator-sdk:gen-csv:customresourcedefinitions.displayName="Kogito Git Source"
type GitSource struct {
// Git URI for the s2i source.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Git URI"
URI string `json:"uri"`
// Branch to use in the Git repository.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Git Reference"
Reference string `json:"reference,omitempty"`
// Context/subdirectory where the code is located, relative to the repo root.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Git Context"
ContextDir string `json:"contextDir,omitempty"`
}
// WebhookType literal type to distinguish between different types of webhooks.
type WebhookType string
const (
// GitHubWebhook GitHub webhook | {
k.Resources.Limits = corev1.ResourceList{}
} | conditional_block |
kogitoapp_types.go | are going to configure the persistence infrastructure yourself.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Enable Persistence"
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch"
EnablePersistence bool `json:"enablePersistence,omitempty"`
// Set this property to true to tell the operator to deploy an instance of Kafka via the Strimzi Operator and configure this service with
// the proper information to connect to the Kafka cluster.
// The Kafka cluster service endpoint will be injected in the Kogito Service container via an environment variable named "KAFKA_BOOTSTRAP_SERVERS" e.g.: kafka-kogito:9092.
// Set to false or ignore it if your service does not need messaging or if you are going to configure the messaging infrastructure yourself.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Enable Events"
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch"
EnableEvents bool `json:"enableEvents,omitempty"`
}
// GetRuntime ...
func (k *KogitoAppSpec) GetRuntime() RuntimeType {
return k.Runtime
}
// GetBuild ...
func (k *KogitoAppSpec) GetBuild() *KogitoAppBuildObject {
if k == nil {
return nil
}
return k.Build
}
// IsGitURIEmpty checks if the provided Git URI is empty or not.
func (k *KogitoAppSpec) IsGitURIEmpty() bool {
if k == nil {
return true
}
if k.Build == nil {
return true
}
return len(k.Build.GitSource.URI) == 0
}
// KogitoAppBuildObject Data to define how to build an application from source.
// +k8s:openapi-gen=true
// +operator-sdk:gen-csv:customresourcedefinitions.displayName="Kogito Service Build"
type KogitoAppBuildObject struct {
Incremental bool `json:"incremental,omitempty"`
// Environment variables used during build time.
// +listType=atomic
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Build Env Variables"
Envs []corev1.EnvVar `json:"envs,omitempty"`
// Information about the git repository where the Kogito App source code resides.
// If set, the operator will use source to image strategy build.
// +optional
GitSource GitSource `json:"gitSource,omitempty"`
// WebHook secrets for build configs.
// +listType=atomic
// +optional
Webhooks []WebhookSecret `json:"webhooks,omitempty"`
// + optional
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Images Version"
// Image version for the Kogito official images used during the build. E.g.: 0.6.0. Default to current Operator version.
ImageVersion string `json:"imageVersion,omitempty"`
// Custom image used by the source to image process to build the Kogito Service binaries. Takes precedence over ImageVersion attribute.
// + optional
// +kubebuilder:validation:Pattern=`(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9](?::[0-9]{0,5})?/([a-z0-9-]+)/([a-z0-9-]+):(([a-z0-9\.-]+))`
ImageS2ITag string `json:"imageS2ITag,omitempty"`
// Custom image used by the source to image process to build the final Kogito Service image. Takes precedence over ImageVersion attribute.
// + optional
// +kubebuilder:validation:Pattern=`(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9](?::[0-9]{0,5})?/([a-z0-9-]+)/([a-z0-9-]+):(([a-z0-9\.-]+))`
ImageRuntimeTag string `json:"imageRuntimeTag,omitempty"`
// Native indicates if the Kogito Service built should be compiled to run on native mode when Runtime is Quarkus. For more information, see https://www.graalvm.org/docs/reference-manual/aot-compilation/.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Native Build"
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch"
Native bool `json:"native,omitempty"`
// Resources for S2I builder pods.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:resourceRequirements"
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
// Internal Maven Mirror to be used during source-to-image builds to considerably increase build speed.
MavenMirrorURL string `json:"mavenMirrorURL,omitempty"`
// Artifact contains override information for building the Maven artifact.
// + optional
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Final Artifact"
Artifact Artifact `json:"artifact,omitempty"`
// If set to true will print the logs for downloading/uploading of maven dependencies. Defaults to false.
// + optional
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch"
EnableMavenDownloadOutput bool `json:"enableMavenDownloadOutput,omitempty"`
}
// AddEnvironmentVariable adds new environment variable to build environment variables.
func (k *KogitoAppBuildObject) AddEnvironmentVariable(name, value string) {
env := corev1.EnvVar{
Name: name,
Value: value,
}
k.Envs = append(k.Envs, env)
}
// AddResourceRequest adds new resource request. Works also on an uninitialized Requests field.
func (k *KogitoAppBuildObject) AddResourceRequest(name, value string) |
// AddResourceLimit adds new resource limit. Works also on an uninitialized Limits field.
func (k *KogitoAppBuildObject) AddResourceLimit(name, value string) {
if k.Resources.Limits == nil {
k.Resources.Limits = corev1.ResourceList{}
}
k.Resources.Limits[corev1.ResourceName(name)] = resource.MustParse(value)
}
// KogitoAppServiceObject Data to define the service of the Kogito application.
// +k8s:openapi-gen=true
type KogitoAppServiceObject struct {
// Labels for the application service.
Labels map[string]string `json:"labels,omitempty"`
}
// GitSource Git coordinates to locate the source code to build.
// +k8s:openapi-gen=true
// +operator-sdk:gen-csv:customresourcedefinitions.displayName="Kogito Git Source"
type GitSource struct {
// Git URI for the s2i source.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Git URI"
URI string `json:"uri"`
// Branch to use in the Git repository.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Git Reference"
Reference string `json:"reference,omitempty"`
// Context/subdirectory where the code is located, relative to the repo root.
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Git Context"
ContextDir string `json:"contextDir,omitempty"`
}
// WebhookType literal type to distinguish between different types of webhooks.
type WebhookType string
const (
// GitHubWebhook GitHub webhook | {
if k.Resources.Requests == nil {
k.Resources.Requests = corev1.ResourceList{}
}
k.Resources.Requests[corev1.ResourceName(name)] = resource.MustParse(value)
} | identifier_body |
block_stream.rs | stream: Box<dyn BlockStream<C>>,
size_hint: usize,
) -> Box<dyn BlockStream<C>> {
let (sender, receiver) = mpsc::channel::<Result<BlockStreamEvent<C>, Error>>(size_hint);
crate::spawn(async move { BufferedBlockStream::stream_blocks(stream, sender).await });
Box::new(BufferedBlockStream::new(receiver))
}
pub fn new(mut receiver: Receiver<Result<BlockStreamEvent<C>, Error>>) -> Self {
let inner = stream! {
loop {
let event = match receiver.recv().await {
Some(evt) => evt,
None => return,
};
yield event
}
};
Self {
inner: Box::pin(inner),
}
}
pub async fn stream_blocks(
mut stream: Box<dyn BlockStream<C>>,
sender: Sender<Result<BlockStreamEvent<C>, Error>>,
) -> Result<(), Error> {
while let Some(event) = stream.next().await {
match sender.send(event).await {
Ok(_) => continue,
Err(err) => {
return Err(anyhow!(
"buffered blockstream channel is closed, stopping. Err: {}",
err
))
}
}
}
Ok(())
}
}
impl<C: Blockchain> BlockStream<C> for BufferedBlockStream<C> {}
impl<C: Blockchain> Stream for BufferedBlockStream<C> {
type Item = Result<BlockStreamEvent<C>, Error>;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
self.inner.poll_next_unpin(cx)
}
}
pub trait BlockStream<C: Blockchain>:
Stream<Item = Result<BlockStreamEvent<C>, Error>> + Unpin + Send
{
}
/// BlockRefetcher abstraction allows a chain to decide if a block must be refetched after a dynamic data source was added
#[async_trait]
pub trait BlockRefetcher<C: Blockchain>: Send + Sync {
fn required(&self, chain: &C) -> bool;
async fn get_block(
&self,
chain: &C,
logger: &Logger,
cursor: FirehoseCursor,
) -> Result<C::Block, Error>;
}
/// BlockStreamBuilder is an abstraction that would separate the logic for building streams from the blockchain trait
#[async_trait]
pub trait BlockStreamBuilder<C: Blockchain>: Send + Sync {
async fn build_firehose(
&self,
chain: &C,
deployment: DeploymentLocator,
block_cursor: FirehoseCursor,
start_blocks: Vec<BlockNumber>,
subgraph_current_block: Option<BlockPtr>,
filter: Arc<C::TriggerFilter>,
unified_api_version: UnifiedMappingApiVersion,
) -> Result<Box<dyn BlockStream<C>>>;
async fn build_polling(
&self,
chain: &C,
deployment: DeploymentLocator,
start_blocks: Vec<BlockNumber>,
subgraph_current_block: Option<BlockPtr>,
filter: Arc<C::TriggerFilter>,
unified_api_version: UnifiedMappingApiVersion,
) -> Result<Box<dyn BlockStream<C>>>;
}
#[derive(Debug, Clone)]
pub struct FirehoseCursor(Option<String>);
impl FirehoseCursor {
#[allow(non_upper_case_globals)]
pub const None: Self = FirehoseCursor(None);
pub fn is_none(&self) -> bool {
self.0.is_none()
}
}
impl fmt::Display for FirehoseCursor {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.write_str(self.0.as_deref().unwrap_or(""))
}
}
impl From<String> for FirehoseCursor {
fn from(cursor: String) -> Self {
// Treat a cursor of "" as None, not absolutely necessary for correctness since the firehose
// treats both as the same, but makes it a little clearer.
if cursor.is_empty() {
FirehoseCursor::None
} else {
FirehoseCursor(Some(cursor))
}
}
}
impl From<Option<String>> for FirehoseCursor {
fn from(cursor: Option<String>) -> Self {
match cursor {
None => FirehoseCursor::None,
Some(s) => FirehoseCursor::from(s),
}
}
}
impl AsRef<Option<String>> for FirehoseCursor {
fn as_ref(&self) -> &Option<String> {
&self.0
}
}
#[derive(Debug)]
pub struct BlockWithTriggers<C: Blockchain> {
pub block: C::Block,
pub trigger_data: Vec<C::TriggerData>,
}
impl<C: Blockchain> Clone for BlockWithTriggers<C>
where
C::TriggerData: Clone,
{
fn clone(&self) -> Self {
Self {
block: self.block.clone(),
trigger_data: self.trigger_data.clone(),
}
}
}
impl<C: Blockchain> BlockWithTriggers<C> {
/// Creates a BlockWithTriggers structure, which holds
/// the trigger data ordered and without any duplicates.
pub fn new(block: C::Block, mut trigger_data: Vec<C::TriggerData>, logger: &Logger) -> Self {
// This is where triggers get sorted.
trigger_data.sort();
let old_len = trigger_data.len();
// This is removing the duplicate triggers in the case of multiple
// data sources fetching the same event/call/etc.
trigger_data.dedup();
let new_len = trigger_data.len();
if new_len != old_len {
debug!(
logger,
"Trigger data had duplicate triggers";
"block_number" => block.number(),
"block_hash" => block.hash().hash_hex(),
"old_length" => old_len,
"new_length" => new_len,
);
}
Self {
block,
trigger_data,
}
}
pub fn trigger_count(&self) -> usize {
self.trigger_data.len()
}
pub fn ptr(&self) -> BlockPtr {
self.block.ptr()
}
pub fn parent_ptr(&self) -> Option<BlockPtr> {
self.block.parent_ptr()
}
}
#[async_trait]
pub trait TriggersAdapter<C: Blockchain>: Send + Sync {
// Return the block that is `offset` blocks before the block pointed to
// by `ptr` from the local cache. An offset of 0 means the block itself,
// an offset of 1 means the block's parent etc. If the block is not in
// the local cache, return `None`
async fn ancestor_block(
&self,
ptr: BlockPtr,
offset: BlockNumber,
) -> Result<Option<C::Block>, Error>;
// Returns a sequence of blocks in increasing order of block number.
// Each block will include all of its triggers that match the given `filter`.
// The sequence may omit blocks that contain no triggers,
// but all returned blocks must part of a same chain starting at `chain_base`.
// At least one block will be returned, even if it contains no triggers.
// `step_size` is the suggested number blocks to be scanned.
async fn scan_triggers(
&self,
from: BlockNumber,
to: BlockNumber,
filter: &C::TriggerFilter,
) -> Result<Vec<BlockWithTriggers<C>>, Error>;
// Used for reprocessing blocks when creating a data source.
async fn triggers_in_block(
&self,
logger: &Logger,
block: C::Block,
filter: &C::TriggerFilter,
) -> Result<BlockWithTriggers<C>, Error>;
/// Return `true` if the block with the given hash and number is on the
/// main chain, i.e., the chain going back from the current chain head.
async fn is_on_main_chain(&self, ptr: BlockPtr) -> Result<bool, Error>;
/// Get pointer to parent of `block`. This is called when reverting `block`.
async fn parent_ptr(&self, block: &BlockPtr) -> Result<Option<BlockPtr>, Error>;
}
#[async_trait]
pub trait FirehoseMapper<C: Blockchain>: Send + Sync {
async fn to_block_stream_event(
&self,
logger: &Logger,
response: &firehose::Response,
adapter: &Arc<dyn TriggersAdapter<C>>,
filter: &C::TriggerFilter,
) -> Result<BlockStreamEvent<C>, FirehoseError>;
/// Returns the [BlockPtr] value for this given block number. This is the block pointer
/// of the longuest according to Firehose view of the blockchain state.
///
/// This is a thin wrapper around [FirehoseEndpoint#block_ptr_for_number] to make
/// it chain agnostic and callable from chain agnostic [FirehoseBlockStream].
async fn block_ptr_for_number(
&self,
logger: &Logger,
endpoint: &Arc<FirehoseEndpoint>,
number: BlockNumber,
) -> Result<BlockPtr, Error>;
/// Returns the closest final block ptr to the block ptr received.
/// On probablitics chain like Ethereum, final is determined by
/// the confirmations threshold configured for the Firehose stack (currently
/// hard-coded to 200).
///
| inner: Pin<Box<dyn Stream<Item = Result<BlockStreamEvent<C>, Error>> + Send>>,
}
impl<C: Blockchain + 'static> BufferedBlockStream<C> {
pub fn spawn_from_stream( | random_line_split |
|
block_stream.rs | pub trait BlockStream<C: Blockchain>:
Stream<Item = Result<BlockStreamEvent<C>, Error>> + Unpin + Send
{
}
/// BlockRefetcher abstraction allows a chain to decide if a block must be refetched after a dynamic data source was added
#[async_trait]
pub trait BlockRefetcher<C: Blockchain>: Send + Sync {
fn required(&self, chain: &C) -> bool;
async fn get_block(
&self,
chain: &C,
logger: &Logger,
cursor: FirehoseCursor,
) -> Result<C::Block, Error>;
}
/// BlockStreamBuilder is an abstraction that would separate the logic for building streams from the blockchain trait
#[async_trait]
pub trait BlockStreamBuilder<C: Blockchain>: Send + Sync {
async fn build_firehose(
&self,
chain: &C,
deployment: DeploymentLocator,
block_cursor: FirehoseCursor,
start_blocks: Vec<BlockNumber>,
subgraph_current_block: Option<BlockPtr>,
filter: Arc<C::TriggerFilter>,
unified_api_version: UnifiedMappingApiVersion,
) -> Result<Box<dyn BlockStream<C>>>;
async fn build_polling(
&self,
chain: &C,
deployment: DeploymentLocator,
start_blocks: Vec<BlockNumber>,
subgraph_current_block: Option<BlockPtr>,
filter: Arc<C::TriggerFilter>,
unified_api_version: UnifiedMappingApiVersion,
) -> Result<Box<dyn BlockStream<C>>>;
}
#[derive(Debug, Clone)]
pub struct FirehoseCursor(Option<String>);
impl FirehoseCursor {
#[allow(non_upper_case_globals)]
pub const None: Self = FirehoseCursor(None);
pub fn is_none(&self) -> bool {
self.0.is_none()
}
}
impl fmt::Display for FirehoseCursor {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.write_str(self.0.as_deref().unwrap_or(""))
}
}
impl From<String> for FirehoseCursor {
fn from(cursor: String) -> Self {
// Treat a cursor of "" as None, not absolutely necessary for correctness since the firehose
// treats both as the same, but makes it a little clearer.
if cursor.is_empty() {
FirehoseCursor::None
} else {
FirehoseCursor(Some(cursor))
}
}
}
impl From<Option<String>> for FirehoseCursor {
fn from(cursor: Option<String>) -> Self {
match cursor {
None => FirehoseCursor::None,
Some(s) => FirehoseCursor::from(s),
}
}
}
impl AsRef<Option<String>> for FirehoseCursor {
fn as_ref(&self) -> &Option<String> {
&self.0
}
}
#[derive(Debug)]
pub struct BlockWithTriggers<C: Blockchain> {
pub block: C::Block,
pub trigger_data: Vec<C::TriggerData>,
}
impl<C: Blockchain> Clone for BlockWithTriggers<C>
where
C::TriggerData: Clone,
{
fn clone(&self) -> Self {
Self {
block: self.block.clone(),
trigger_data: self.trigger_data.clone(),
}
}
}
impl<C: Blockchain> BlockWithTriggers<C> {
/// Creates a BlockWithTriggers structure, which holds
/// the trigger data ordered and without any duplicates.
pub fn | (block: C::Block, mut trigger_data: Vec<C::TriggerData>, logger: &Logger) -> Self {
// This is where triggers get sorted.
trigger_data.sort();
let old_len = trigger_data.len();
// This is removing the duplicate triggers in the case of multiple
// data sources fetching the same event/call/etc.
trigger_data.dedup();
let new_len = trigger_data.len();
if new_len != old_len {
debug!(
logger,
"Trigger data had duplicate triggers";
"block_number" => block.number(),
"block_hash" => block.hash().hash_hex(),
"old_length" => old_len,
"new_length" => new_len,
);
}
Self {
block,
trigger_data,
}
}
pub fn trigger_count(&self) -> usize {
self.trigger_data.len()
}
pub fn ptr(&self) -> BlockPtr {
self.block.ptr()
}
pub fn parent_ptr(&self) -> Option<BlockPtr> {
self.block.parent_ptr()
}
}
#[async_trait]
pub trait TriggersAdapter<C: Blockchain>: Send + Sync {
// Return the block that is `offset` blocks before the block pointed to
// by `ptr` from the local cache. An offset of 0 means the block itself,
// an offset of 1 means the block's parent etc. If the block is not in
// the local cache, return `None`
async fn ancestor_block(
&self,
ptr: BlockPtr,
offset: BlockNumber,
) -> Result<Option<C::Block>, Error>;
// Returns a sequence of blocks in increasing order of block number.
// Each block will include all of its triggers that match the given `filter`.
// The sequence may omit blocks that contain no triggers,
// but all returned blocks must part of a same chain starting at `chain_base`.
// At least one block will be returned, even if it contains no triggers.
// `step_size` is the suggested number blocks to be scanned.
async fn scan_triggers(
&self,
from: BlockNumber,
to: BlockNumber,
filter: &C::TriggerFilter,
) -> Result<Vec<BlockWithTriggers<C>>, Error>;
// Used for reprocessing blocks when creating a data source.
async fn triggers_in_block(
&self,
logger: &Logger,
block: C::Block,
filter: &C::TriggerFilter,
) -> Result<BlockWithTriggers<C>, Error>;
/// Return `true` if the block with the given hash and number is on the
/// main chain, i.e., the chain going back from the current chain head.
async fn is_on_main_chain(&self, ptr: BlockPtr) -> Result<bool, Error>;
/// Get pointer to parent of `block`. This is called when reverting `block`.
async fn parent_ptr(&self, block: &BlockPtr) -> Result<Option<BlockPtr>, Error>;
}
#[async_trait]
pub trait FirehoseMapper<C: Blockchain>: Send + Sync {
async fn to_block_stream_event(
&self,
logger: &Logger,
response: &firehose::Response,
adapter: &Arc<dyn TriggersAdapter<C>>,
filter: &C::TriggerFilter,
) -> Result<BlockStreamEvent<C>, FirehoseError>;
/// Returns the [BlockPtr] value for this given block number. This is the block pointer
/// of the longuest according to Firehose view of the blockchain state.
///
/// This is a thin wrapper around [FirehoseEndpoint#block_ptr_for_number] to make
/// it chain agnostic and callable from chain agnostic [FirehoseBlockStream].
async fn block_ptr_for_number(
&self,
logger: &Logger,
endpoint: &Arc<FirehoseEndpoint>,
number: BlockNumber,
) -> Result<BlockPtr, Error>;
/// Returns the closest final block ptr to the block ptr received.
/// On probablitics chain like Ethereum, final is determined by
/// the confirmations threshold configured for the Firehose stack (currently
/// hard-coded to 200).
///
/// On some other chain like NEAR, the actual final block number is determined
/// from the block itself since it contains information about which block number
/// is final against the current block.
///
/// To take an example, assuming we are on Ethereum, the final block pointer
/// for block #10212 would be the determined final block #10012 (10212 - 200 = 10012).
async fn final_block_ptr_for(
&self,
logger: &Logger,
endpoint: &Arc<FirehoseEndpoint>,
block: &C::Block,
) -> Result<BlockPtr, Error>;
}
#[async_trait]
pub trait SubstreamsMapper<C: Blockchain>: Send + Sync {
async fn to_block_stream_event(
&self,
logger: &Logger,
response: Option<Message>,
// adapter: &Arc<dyn TriggersAdapter<C>>,
// filter: &C::TriggerFilter,
) -> Result<Option<BlockStreamEvent<C>>, SubstreamsError>;
}
#[derive(Error, Debug)]
pub enum FirehoseError {
/// We were unable to decode the received block payload into the chain specific Block struct (e.g. chain_ethereum::pb::Block)
#[error("received gRPC block payload cannot be decoded: {0}")]
DecodingError(#[from] prost::DecodeError),
/// Some unknown error occurred
#[error("unknown error")]
UnknownError(#[from] anyhow::Error),
}
#[derive(Error, Debug)]
pub enum SubstreamsError {
#[error("response is missing the clock information")]
MissingClockError,
#[error("invalid undo message")]
InvalidUndoError,
/// We were unable to decode the received block payload into the chain specific Block struct (e.g. chain_ethereum::pb::Block)
#[error("received g | new | identifier_name |
MPO.py | = pd.DataFrame(self.CML_weights, columns=self.stock_names+["Risk-free rate"])
T = [re.sub(r'\n', "% <br>", re.sub(r'[ ]+', " ", PD.iloc[i].to_string() ))+"%" for i in PD.index]
if n == "CMLp":
PD = pd.DataFrame(self.CMLpw, index=self.stock_names+["Risk-free rate"])
T = [re.sub(r'\n', "% <br>", re.sub(r'[ ]+', " ", PD.to_string() ))+"%"]
return T
def annotations(strings, placements): # TODO: better annotations
annotations=list()
for s, p in zip(strings, placements):
d = dict(
x=p[0], y=p[1],
xref='paper', yref='paper',
text=s,
showarrow=True,
arrowhead=20
)
annotations.append(d)
return annotations
start = "{0}-{1}-{2}".format(self.start.day, self.start.month, self.start.year)
end = "{0}-{1}-{2}".format(self.end.day, self.end.month, self.end.year)
self.name = name = "{0} - {1}".format(start, end)
data = list()
EFF = go.Scatter(
x = self.frontier_risk[self.idxmin:],
y = self.frontier_exp_return[self.idxmin:],
mode = 'markers+lines',
legendgroup = name if self.stack_windows else None,
showlegend = True,
marker = dict(size=5, symbol="circle"), #, color=[1 for _ in self.frontier_risk[self.idxmin:]], colorscale="Electric"),
text = weights_in_text("EFF")[self.idxmin:],
name = "Efficient frontier:<br>{}".format(name) if not self.stack_windows else name
)
EFP = go.Scatter(
x = [self.marketPx],
y = [self.marketPy],
mode = 'markers',
legendgroup = name if self.stack_windows else None,
showlegend = False if self.stack_windows else True,
marker = dict(size=15, symbol="circle"), #color=[1], colorscale="Electric"),
name = "Market/Efficient portfolio"
)
MVP = go.Scatter(
x = [self.minriskPx],
y = [self.minriskPy],
mode = "markers",
legendgroup = name if self.stack_windows else None,
showlegend = False if self.stack_windows else True,
marker = dict(size=15, symbol="diamond-x"), #, color=[1], colorscale="Electric"),
name = "minimum variance portfolio"
)
if self.plot_CML:
CML = go.Scatter(
x = self.CMLx,
y = self.CMLy,
mode='lines+markers',
legendgroup = name if self.stack_windows else None,
text = weights_in_text("CML"),
name = "Capital market line"
#marker = make coler outside space of efficient frontier different collor
)
data.append(CML)
if self.required_return:
CMLp = go.Scatter(
x = [self.CMLPx],
y = [self.CMLPy],
mode = "markers",
legendgroup = name if self.stack_windows else None,
text = weights_in_text("CMLp"),
name = "optimal allocation<br>with required return of {0}%".format(self.required_return*100),
marker = dict(size=15, symbol="diamond-x")
)
data.append(CMLp)
if self.plot_simulation:
MonteCarlo = go.Scatter(
x = self.MCx,
y = self.MCy,
mode = "markers",
marker = dict(size=6, colorscale="Electric", color=self.MCsr, showscale=True,
colorbar=dict(title="Sharpe Ratio", titleside="right")),
name = "MonteCarlo Simulated portfolios"
)
data.append(MonteCarlo)
data += [EFF, EFP, MVP]
title = "Efficent Frontier"
if not self.plot_as_windows:
title = format("{0}<br>from {1} to {2}".format(title, start, end))
self.layout = go.Layout(
annotations = annotations([weights_in_text("EFP")],[(0.2,0.8)]) if self.annotations else annotations("",(0,0)), #TODO: better (less hacky) annotations
legend=dict(
x=1.2,
y=1.2,
traceorder='grouped',
tracegroupgap=20,
font=dict(
family='sans-serif',
size=20,
color='#000'
),
bgcolor='#E2E2E2',
bordercolor='#FFFFFF',
borderwidth=2
),
title=title,
showlegend=True,
font=dict(
size=20,
color='#000'
),
hovermode='closest',
yaxis=dict(title="Portfolio Return"),
xaxis=dict(title="Portfolio Variance"),
height=1000,
width=1200,
)
self.plot_data += data
def execute_plot(self):
fig = go.Figure(data=self.plot_data, layout=self.layout)
if self.online:
plotly.tools.set_credentials_file(username="TheVizWiz", api_key="92x5KNp4VDPBDGNtLR2l")
py.plot(fig, filename='efficent_frontier')
if not self.online:
name = self.name_of_data + self.name
plot_url = offline.plot(fig, image='png',auto_open=self.auto_open, image_filename=name,
output_type='file', image_width=1200, image_height=1000,
filename="figures/{0}.html".format(name) # run some sys to create folder
)
self.plot_data = list() # Clear plot data when plot is made
def with_moving_windows(self, operation):
def func_wrapper():
time = self.end - self.start
# self.absolute_start = self.start
# self.absolute_end = self.end
window = datetime.timedelta(days=self.window_size)
window_m = datetime.timedelta(days=self.window_move)
while time-window >= datetime.timedelta(1):
self.end = self.start + window
operation()
self.start = self.start + window_m
time -= window_m
return func_wrapper
def prepare_data(self):
self.get_monthly_data()
self.calculate_log_change()
def analyze_data(self):
self.calculate_covariance_and_var()
self.calculate_expected_market_return()
self.calculate_beta()
self.calculate_regress_params()
self.calculate_exp_return()
self.solve_elements_for_plot()
self.CAPM_prediction()
def run_backtest(self):
#cross-validation of model (WARNING: NOT PRETTY! - gaffataped in last moment)
# TODO. can't run backtest after run_pack why? fix!
self.window_size=365
self.window_move=365
self.market_portfolios = list()
self.expected_portfolio_returns = list()
self.prepare_data()
def one_window():
self.assign_data_window("windows")
self.analyze_data()
self.expected_portfolio_returns.append(self.exp_return_yr)
self.assign_data_window("backtest_weights")
self.analyze_data()
self.market_portfolios.append(self.Wmp)
self.with_moving_windows(one_window)()
self.backtest_results = [(i*x).sum() for i,x in zip(self.expected_portfolio_returns, self.market_portfolios)]
def run_pack(self):
self.plot_data = list()
self.prepare_data()
def one_window():
self.assign_data_window("windows")
self.analyze_data()
self.prepare_plot()
if not self.stack_windows: self.execute_plot()
if self.plot_as_windows:
self.with_moving_windows(one_window)()
if self.stack_windows: self.execute_plot()
else:
one_window()
if __name__ == '__main__':
CP = Calcualtion_pack(
stock_ticks = ["WIKI/AAPL", "WIKI/ABC", "WIKI/AGN", "WIKI/ADP", "WIKI/ADSK", "WIKI/IBM", "WIKI/GE"],
stock_names = "APL ABC AGN ADP ADSK IBM GE".split(),
# stock_ticks=["NASDAQOMX/NQDK4000DKK", "NASDAQOMX/NQDE", "NASDAQOMX/NQJP2000JPY",
# "NASDAQOMX/NQHK2000HKD", "NASDAQOMX/NQGB", "NASDAQOMX/NQSE",
# "NASDAQOMX/NQFI"],
market_indecies=["GOOGL"],
start=datetime.datetime(1999, 1, 1),
end=datetime.datetime(2018,1,1),
risk_free_rate= 0.03,
source = "pickle",
name_of_data = "USA",
n_sim = 10000, | # online = True,
# window_size=3650,
# window_move=365, | random_line_split |
|
MPO.py | (1)).dropna()
def assign_data_window(self, opperation_type=None):
"""the unelegance is here that there needs to be first a calculation of the backtest weights with one range of data held out
followed then by a calculation of the backtest expected return calculated on the held out data
this assign data method is fairly adhock"""
if opperation_type == "backtest_weights":
df1 = self.log_change_data
df2 = self.log_change_data[self.start:self.end]
self.data_window = pd.concat([df1, df2]).drop_duplicates(keep=False) # removes the window from the dataframe ie. hold one out
elif opperation_type == "windows":
self.data_window = self.log_change_data[self.start:self.end]
else:
self.data_window = self.log_change_data
def calculate_covariance_and_var(self):
self.cov_matrix = self.data_window.cov() * 12
self.var = pd.Series(np.diag(self.cov_matrix), index=[self.cov_matrix.columns])
def CAPM_prediction(self):
rf = self.risk_free_rate
b = self.var.drop(self.market_indecies)
Rm = self.market_returns_yr
self.CAPM = sum((rf + b*(Rm-rf).values) * (self.Wmp))/100 #CAPM times weigths of market portfolio
if self.plot_as_windows:
self.CAPMs.append(self.CAPM)
def calculate_beta(self): #can be done vith linalg cov_matrix * var
#getting beta as covar/var
d = defaultdict(list)
for index in self.market_indecies:
var = self.cov_matrix.loc[index, index]
for tick in self.stock_ticks:
covar = self.cov_matrix.loc[index, tick]
d[index] += [covar/var]
self.beta1 = pd.DataFrame(data=d, index=self.stock_ticks)
def calculate_regress_params(self):
#getting alpha and beta with linear regression
a = defaultdict(list)
b = defaultdict(list)
for market in self.market_indecies:
for tick in self.stock_ticks:
slope, intercept, _, _, _ = linregress(self.data_window[tick], self.data_window[market])
a[market] += [intercept]
b[market] += [slope]
self.alfa = pd.DataFrame(data=a, index=self.stock_ticks)
self.beta = pd.DataFrame(data=b, index=self.stock_ticks)
def calculate_expected_market_return(self):
#Using plane mean value
self.market_returns = self.data_window[self.market_indecies].mean()
#scaling to yearly using eulers
self.market_returns_yr = np.exp(self.market_returns*12)-1
def calculate_exp_return(self):
# #Using CAPM
# self.exp_return = self.risk_free_rate + (self.market_returns-self.risk_free_rate) * self.beta
# Using plane mean value
self.exp_return = self.data_window[self.stock_ticks].mean()
self.exp_return_yr = np.exp(self.exp_return*12)-1
def solve_elements_for_plot(self):
"""Operations"""
def quad_var(W, C):
return np.sqrt(dot(dot(W.T, C), W)) # Quadratic expression to calculate portfolio risk
def exp_return(W, R):
return np.dot(W.T, R).sum() # Expectd_portfolio_return
def exp_return1(W, R, rf):
return rf + np.dot(W.T, (R-rf)).sum()
def sharpe_ratio(Fr, Vf, rf):
# returns Sr
return (Fr - rf) / Vf
def CML(Vf, rf, Sr):
# risk_free_rate + risk * Sharpe_ratio_of_the_market_portfolio
return Vf * Sr + rf
def qsolve(R, Fr, C):
| h = ({'type':'eq', 'fun': lambda W: sum(W)-1.}, # Sum of weights = 100%
{'type':'eq', 'fun': lambda W: exp_return(W, R) - r}) # equalizes portfolio return to r
for r in Fr:
# For given level of return r, find weights which minimizes portfolio variance.
optimized = minimize(fitness, W, args=(C, r), method='SLSQP', #Sequential Least SQuares Programming
constraints=h, bounds=b)
X = optimized.x
Wf.append(X)
Vx = quad_var(X,C)
Vf.append(Vx)
return Vf, Wf
R = self.exp_return_yr.values
Fr = np.linspace(min(R), max(R), num=100)
C = self.cov_matrix.iloc[:-1,:-1].values #cov matrix without market index
Vf, Wf = qsolve(R, Fr, C)
rf = self.risk_free_rate
self.EFFsr = sharpe_ratio(Fr, Vf, rf) #sharpe ratio for portfolios on the eficient frontier
# FRONTIER
self.frontier_exp_return = Fr #Y axis of EFF
self.frontier_risk = Vf #X axis of EFF
self.frontier_weights = [[round(w*100,2) for w in ws] for ws in Wf] #TODO might be done directly in pandas
#MARKET PORTFOLIO
idxmax = np.argmax(self.EFFsr) # index of "market" portfolio
MPsr = self.EFFsr[idxmax] # sharpe ratio of "market" portfolio ie. slope of CML
self.Wmp = self.frontier_weights[idxmax]# weights of market portfolio
self.marketPx = Vf[idxmax] # "market" portfolio x and y
self.marketPy = Fr[idxmax]
#MINIMUM RISK PORTFOLIO
idxmin = self.idxmin = np.argmin(Vf) # index of minimum risk portfolio
self.minriskPx = Vf[idxmin]
self.minriskPy = Fr[idxmin]
if self.plot_CML:
#CAPITAL MARKET LINE
self.CMLx = np.linspace(0, max(Vf), num=100)
self.CMLy = [CML(x, rf, MPsr) for x in self.CMLx]
def qsolve1(CMLy, CMLx, C, R): #TODO: make 1 Qsolver with intuitive changeable constraints
W = R*0 + 1/len(R) #Initialize equal procent weights
rf = self.risk_free_rate
#Bounds (inequality constraints)
b = [(0.,2.) for i in W] # weights between 0%..100%. - no borrowing -No shorting
def fitness(W, x, y, rf, ri, re):
Pv = sharpe_ratio(x, y, rf)
return - Pv.sum()
Vf, Wf = [], []
# Equality constraints
h = ({'type':'eq', 'fun': lambda W: sum(W)-1.}, # Sum of weights = 100%
{'type':'eq', 'fun': lambda W: quad_var(W, C) - ri}, # equalizes portfolio risk to ri
{'type':'eq', 'fun': lambda W: exp_return1(W, R, rf) - re}) # equalizes portfolio return to re
for ri, re in zip(CMLx, CMLy):
# For given level of return r, find weights which minimizes portfolio variance.
optimized = minimize(fitness, W, args=(CMLx, CMLy, rf, ri, re), method='SLSQP', #Sequential Least SQuares Programming
constraints=h, bounds=b)
X = optimized.x
Wf.append(X)
Vx = quad_var(X,C)
Vf.append(Vx)
return Vf, Wf
R1 = self.exp_return_yr
R1["Rf"] = self.risk_free_rate
R1 = R1.values
C1 = self.cov_matrix.iloc[:-1,:-1]
C1["Rf"] = 0.0
C1.loc["Rf"] = 0.0
C1 = C1.values
_, Wcml = qsolve1(self.CMLy, self.CMLx, C1, R1)
self.CML_weights = [[round(w*100,2) for w in ws] for ws in Wcml]
#portfolio | """
where:
R is the vector of expected returns
Fr is the range expected returns on the EFF
C is the var-covariance matrix
"""
# TODO: add options to short and borrow
W = R*0 + 1/len(R) #Initialize equal procent weights
#Bounds (inequality constraints)
b = [(0.,1.) for i in W] # weights between 0%..100%. - no borrowing -No shorting
def fitness(W, C, r):
Pv = quad_var(W, C)
return Pv
Vf, Wf = [], []
# Equality constraints | identifier_body |
MPO.py | (W)-1.}, # Sum of weights = 100%
{'type':'eq', 'fun': lambda W: quad_var(W, C) - ri}, # equalizes portfolio risk to ri
{'type':'eq', 'fun': lambda W: exp_return1(W, R, rf) - re}) # equalizes portfolio return to re
for ri, re in zip(CMLx, CMLy):
# For given level of return r, find weights which minimizes portfolio variance.
optimized = minimize(fitness, W, args=(CMLx, CMLy, rf, ri, re), method='SLSQP', #Sequential Least SQuares Programming
constraints=h, bounds=b)
X = optimized.x
Wf.append(X)
Vx = quad_var(X,C)
Vf.append(Vx)
return Vf, Wf
R1 = self.exp_return_yr
R1["Rf"] = self.risk_free_rate
R1 = R1.values
C1 = self.cov_matrix.iloc[:-1,:-1]
C1["Rf"] = 0.0
C1.loc["Rf"] = 0.0
C1 = C1.values
_, Wcml = qsolve1(self.CMLy, self.CMLx, C1, R1)
self.CML_weights = [[round(w*100,2) for w in ws] for ws in Wcml]
#portfolio on CML with rr as return
if self.required_return:
# DANGER! Mess ahead
rr = self.required_return
risk = (rr-rf)/MPsr
self.CMLPx = risk
self.CMLPy = rr
_, CMLpw = qsolve1(np.array([rr]), np.array([risk]), C1, R1)
self.CMLpw = [round(w*100,2) for w in CMLpw[0]] #Fix: why index?
if self.plot_as_windows: self.CMLpw_weights.append(self.CMLpw)
if self.plot_simulation:
def MCsimulation(R, C, rf):
returns, volatility, ratio = [], [], []
for single_portfolio in range(self.n_sim):
W = np.random.normal(scale=4,size=len(self.stock_ticks))**2
W /= np.sum(W)
ret = exp_return(W, R)
vol = quad_var(W, C)
returns.append(ret)
volatility.append(vol)
ratio.append(sharpe_ratio(ret, vol, rf))
self.MCx = volatility
self.MCy = returns
self.MCsr = ratio
MCsimulation(R, C, rf)
#TODO: plot 100% in one stock for evry stock
def prepare_plot(self):
def weights_in_text(n):
if n == "EFF":
PD = pd.DataFrame(self.frontier_weights, columns=self.stock_names)
T = [re.sub(r'\n', "% <br>", re.sub(r'[ ]+', " ", PD.iloc[i].to_string() ))+"%" for i in PD.index]
if n == "EFP":
T = "Efficient portfolio<br>"+"".join(["{0}: {1}%<br>".format(name, weight)
for name, weight in zip(self.stock_names, self.Wmp)])
if n == "CML":
PD = pd.DataFrame(self.CML_weights, columns=self.stock_names+["Risk-free rate"])
T = [re.sub(r'\n', "% <br>", re.sub(r'[ ]+', " ", PD.iloc[i].to_string() ))+"%" for i in PD.index]
if n == "CMLp":
PD = pd.DataFrame(self.CMLpw, index=self.stock_names+["Risk-free rate"])
T = [re.sub(r'\n', "% <br>", re.sub(r'[ ]+', " ", PD.to_string() ))+"%"]
return T
def annotations(strings, placements): # TODO: better annotations
annotations=list()
for s, p in zip(strings, placements):
d = dict(
x=p[0], y=p[1],
xref='paper', yref='paper',
text=s,
showarrow=True,
arrowhead=20
)
annotations.append(d)
return annotations
start = "{0}-{1}-{2}".format(self.start.day, self.start.month, self.start.year)
end = "{0}-{1}-{2}".format(self.end.day, self.end.month, self.end.year)
self.name = name = "{0} - {1}".format(start, end)
data = list()
EFF = go.Scatter(
x = self.frontier_risk[self.idxmin:],
y = self.frontier_exp_return[self.idxmin:],
mode = 'markers+lines',
legendgroup = name if self.stack_windows else None,
showlegend = True,
marker = dict(size=5, symbol="circle"), #, color=[1 for _ in self.frontier_risk[self.idxmin:]], colorscale="Electric"),
text = weights_in_text("EFF")[self.idxmin:],
name = "Efficient frontier:<br>{}".format(name) if not self.stack_windows else name
)
EFP = go.Scatter(
x = [self.marketPx],
y = [self.marketPy],
mode = 'markers',
legendgroup = name if self.stack_windows else None,
showlegend = False if self.stack_windows else True,
marker = dict(size=15, symbol="circle"), #color=[1], colorscale="Electric"),
name = "Market/Efficient portfolio"
)
MVP = go.Scatter(
x = [self.minriskPx],
y = [self.minriskPy],
mode = "markers",
legendgroup = name if self.stack_windows else None,
showlegend = False if self.stack_windows else True,
marker = dict(size=15, symbol="diamond-x"), #, color=[1], colorscale="Electric"),
name = "minimum variance portfolio"
)
if self.plot_CML:
CML = go.Scatter(
x = self.CMLx,
y = self.CMLy,
mode='lines+markers',
legendgroup = name if self.stack_windows else None,
text = weights_in_text("CML"),
name = "Capital market line"
#marker = make coler outside space of efficient frontier different collor
)
data.append(CML)
if self.required_return:
CMLp = go.Scatter(
x = [self.CMLPx],
y = [self.CMLPy],
mode = "markers",
legendgroup = name if self.stack_windows else None,
text = weights_in_text("CMLp"),
name = "optimal allocation<br>with required return of {0}%".format(self.required_return*100),
marker = dict(size=15, symbol="diamond-x")
)
data.append(CMLp)
if self.plot_simulation:
MonteCarlo = go.Scatter(
x = self.MCx,
y = self.MCy,
mode = "markers",
marker = dict(size=6, colorscale="Electric", color=self.MCsr, showscale=True,
colorbar=dict(title="Sharpe Ratio", titleside="right")),
name = "MonteCarlo Simulated portfolios"
)
data.append(MonteCarlo)
data += [EFF, EFP, MVP]
title = "Efficent Frontier"
if not self.plot_as_windows:
title = format("{0}<br>from {1} to {2}".format(title, start, end))
self.layout = go.Layout(
annotations = annotations([weights_in_text("EFP")],[(0.2,0.8)]) if self.annotations else annotations("",(0,0)), #TODO: better (less hacky) annotations
legend=dict(
x=1.2,
y=1.2,
traceorder='grouped',
tracegroupgap=20,
font=dict(
family='sans-serif',
size=20,
color='#000'
),
bgcolor='#E2E2E2',
bordercolor='#FFFFFF',
borderwidth=2
),
title=title,
showlegend=True,
font=dict(
size=20,
color='#000'
),
hovermode='closest',
yaxis=dict(title="Portfolio Return"),
xaxis=dict(title="Portfolio Variance"),
height=1000,
width=1200,
)
self.plot_data += data
def execute_plot(self):
fig = go.Figure(data=self.plot_data, layout=self.layout)
if self.online:
plotly.tools.set_credentials_file(username="TheVizWiz", api_key="92x5KNp4VDPBDGNtLR2l")
py.plot(fig, filename='efficent_frontier')
if not self.online:
| name = self.name_of_data + self.name
plot_url = offline.plot(fig, image='png',auto_open=self.auto_open, image_filename=name,
output_type='file', image_width=1200, image_height=1000,
filename="figures/{0}.html".format(name) # run some sys to create folder
) | conditional_block |
|
MPO.py | 1)).dropna()
def assign_data_window(self, opperation_type=None):
"""the unelegance is here that there needs to be first a calculation of the backtest weights with one range of data held out
followed then by a calculation of the backtest expected return calculated on the held out data
this assign data method is fairly adhock"""
if opperation_type == "backtest_weights":
df1 = self.log_change_data
df2 = self.log_change_data[self.start:self.end]
self.data_window = pd.concat([df1, df2]).drop_duplicates(keep=False) # removes the window from the dataframe ie. hold one out
elif opperation_type == "windows":
self.data_window = self.log_change_data[self.start:self.end]
else:
self.data_window = self.log_change_data
def calculate_covariance_and_var(self):
self.cov_matrix = self.data_window.cov() * 12
self.var = pd.Series(np.diag(self.cov_matrix), index=[self.cov_matrix.columns])
def CAPM_prediction(self):
rf = self.risk_free_rate
b = self.var.drop(self.market_indecies)
Rm = self.market_returns_yr
self.CAPM = sum((rf + b*(Rm-rf).values) * (self.Wmp))/100 #CAPM times weigths of market portfolio
if self.plot_as_windows:
self.CAPMs.append(self.CAPM)
def calculate_beta(self): #can be done vith linalg cov_matrix * var
#getting beta as covar/var
d = defaultdict(list)
for index in self.market_indecies:
var = self.cov_matrix.loc[index, index]
for tick in self.stock_ticks:
covar = self.cov_matrix.loc[index, tick]
d[index] += [covar/var]
self.beta1 = pd.DataFrame(data=d, index=self.stock_ticks)
def calculate_regress_params(self):
#getting alpha and beta with linear regression
a = defaultdict(list)
b = defaultdict(list)
for market in self.market_indecies:
for tick in self.stock_ticks:
slope, intercept, _, _, _ = linregress(self.data_window[tick], self.data_window[market])
a[market] += [intercept]
b[market] += [slope]
self.alfa = pd.DataFrame(data=a, index=self.stock_ticks)
self.beta = pd.DataFrame(data=b, index=self.stock_ticks)
def | (self):
#Using plane mean value
self.market_returns = self.data_window[self.market_indecies].mean()
#scaling to yearly using eulers
self.market_returns_yr = np.exp(self.market_returns*12)-1
def calculate_exp_return(self):
# #Using CAPM
# self.exp_return = self.risk_free_rate + (self.market_returns-self.risk_free_rate) * self.beta
# Using plane mean value
self.exp_return = self.data_window[self.stock_ticks].mean()
self.exp_return_yr = np.exp(self.exp_return*12)-1
def solve_elements_for_plot(self):
"""Operations"""
def quad_var(W, C):
return np.sqrt(dot(dot(W.T, C), W)) # Quadratic expression to calculate portfolio risk
def exp_return(W, R):
return np.dot(W.T, R).sum() # Expectd_portfolio_return
def exp_return1(W, R, rf):
return rf + np.dot(W.T, (R-rf)).sum()
def sharpe_ratio(Fr, Vf, rf):
# returns Sr
return (Fr - rf) / Vf
def CML(Vf, rf, Sr):
# risk_free_rate + risk * Sharpe_ratio_of_the_market_portfolio
return Vf * Sr + rf
def qsolve(R, Fr, C):
"""
where:
R is the vector of expected returns
Fr is the range expected returns on the EFF
C is the var-covariance matrix
"""
# TODO: add options to short and borrow
W = R*0 + 1/len(R) #Initialize equal procent weights
#Bounds (inequality constraints)
b = [(0.,1.) for i in W] # weights between 0%..100%. - no borrowing -No shorting
def fitness(W, C, r):
Pv = quad_var(W, C)
return Pv
Vf, Wf = [], []
# Equality constraints
h = ({'type':'eq', 'fun': lambda W: sum(W)-1.}, # Sum of weights = 100%
{'type':'eq', 'fun': lambda W: exp_return(W, R) - r}) # equalizes portfolio return to r
for r in Fr:
# For given level of return r, find weights which minimizes portfolio variance.
optimized = minimize(fitness, W, args=(C, r), method='SLSQP', #Sequential Least SQuares Programming
constraints=h, bounds=b)
X = optimized.x
Wf.append(X)
Vx = quad_var(X,C)
Vf.append(Vx)
return Vf, Wf
R = self.exp_return_yr.values
Fr = np.linspace(min(R), max(R), num=100)
C = self.cov_matrix.iloc[:-1,:-1].values #cov matrix without market index
Vf, Wf = qsolve(R, Fr, C)
rf = self.risk_free_rate
self.EFFsr = sharpe_ratio(Fr, Vf, rf) #sharpe ratio for portfolios on the eficient frontier
# FRONTIER
self.frontier_exp_return = Fr #Y axis of EFF
self.frontier_risk = Vf #X axis of EFF
self.frontier_weights = [[round(w*100,2) for w in ws] for ws in Wf] #TODO might be done directly in pandas
#MARKET PORTFOLIO
idxmax = np.argmax(self.EFFsr) # index of "market" portfolio
MPsr = self.EFFsr[idxmax] # sharpe ratio of "market" portfolio ie. slope of CML
self.Wmp = self.frontier_weights[idxmax]# weights of market portfolio
self.marketPx = Vf[idxmax] # "market" portfolio x and y
self.marketPy = Fr[idxmax]
#MINIMUM RISK PORTFOLIO
idxmin = self.idxmin = np.argmin(Vf) # index of minimum risk portfolio
self.minriskPx = Vf[idxmin]
self.minriskPy = Fr[idxmin]
if self.plot_CML:
#CAPITAL MARKET LINE
self.CMLx = np.linspace(0, max(Vf), num=100)
self.CMLy = [CML(x, rf, MPsr) for x in self.CMLx]
def qsolve1(CMLy, CMLx, C, R): #TODO: make 1 Qsolver with intuitive changeable constraints
W = R*0 + 1/len(R) #Initialize equal procent weights
rf = self.risk_free_rate
#Bounds (inequality constraints)
b = [(0.,2.) for i in W] # weights between 0%..100%. - no borrowing -No shorting
def fitness(W, x, y, rf, ri, re):
Pv = sharpe_ratio(x, y, rf)
return - Pv.sum()
Vf, Wf = [], []
# Equality constraints
h = ({'type':'eq', 'fun': lambda W: sum(W)-1.}, # Sum of weights = 100%
{'type':'eq', 'fun': lambda W: quad_var(W, C) - ri}, # equalizes portfolio risk to ri
{'type':'eq', 'fun': lambda W: exp_return1(W, R, rf) - re}) # equalizes portfolio return to re
for ri, re in zip(CMLx, CMLy):
# For given level of return r, find weights which minimizes portfolio variance.
optimized = minimize(fitness, W, args=(CMLx, CMLy, rf, ri, re), method='SLSQP', #Sequential Least SQuares Programming
constraints=h, bounds=b)
X = optimized.x
Wf.append(X)
Vx = quad_var(X,C)
Vf.append(Vx)
return Vf, Wf
R1 = self.exp_return_yr
R1["Rf"] = self.risk_free_rate
R1 = R1.values
C1 = self.cov_matrix.iloc[:-1,:-1]
C1["Rf"] = 0.0
C1.loc["Rf"] = 0.0
C1 = C1.values
_, Wcml = qsolve1(self.CMLy, self.CMLx, C1, R1)
self.CML_weights = [[round(w*100,2) for w in ws] for ws in Wcml]
# | calculate_expected_market_return | identifier_name |
my.js | (),
_h = isWap && _w<h?$win.height():_w * 1135 / 720;
$wrapers.height(_h);
if($win.height()<300){
$(".cn-slidetips").hide();
}else{
$(".cn-slidetips").show();
}
};
sl();
$win.resize(sl);
};
//滑动绑定函数
var onSlideChangeTime = 0;
var onSlideChange = function () {
if (onSlideChangeTime>1) {
return;
}
var index = mySwiper.activeIndex;
if (nowIndex == index && mySwiper.touches['abs'] < 50) {
return;
}
onSlideChangeTime = 20;
setAms();
nowIndex = index || 0;
//history.pushState(null, null, "index.html?p=" + (nowIndex + 1));
//执行动画
var timer=setInterval(function () {
onSlideChangeTime -= 1;
if (onSlideChangeTime == 0) {
clearInterval(timer);
}
},1);
};
//触摸结束绑定
var onTouchEnd = function () {
var index = mySwiper.index;
if (nowIndex == slideCount-1 && +mySwiper.touches['diff'] <-50) {
return mySwiper.swipeTo(0);
}
};
//滑动结束绑定
var onSlideChangeEnd = function () {
if(mySwiper.activeIndex==9){
$("#ewm").show();
}else{
$("#ewm").hide();
}
$(".swiper-slide").eq(mySwiper.activeIndex).find("img").each(function(){
if($(this).attr("date-src")){
$(this).attr("src",$(this).attr("date-src"));
}
});
onSlideChange();
};
//绑定滑动主函数
var bindSwiper = function () {
mySwiper = $swiperContainer.swiper({
onTouchEnd: onTouchEnd,
onSlideChangeEnd: onSlideChangeEnd,
//mousewheelControl:true,
mode: 'vertical'
});
};
//滚到下一个屏
var bindNext = function () {
$(".next").on("click", function () {
mySwiper.activeIndex = mySwiper.activeIndex || 0;
var index = mySwiper.activeIndex == slideCount - 1 ? 0 : (mySwiper.activeIndex||0) + 1;
mySwiper.swipeTo(index);
});
};
//初始化
bindSwiper();
bindNext();
setLayout();
setAms();
};
//初始化
yt.init = function () {
window.onload = function () {
$("#loading").hide();
setTimeout(yt.app);
//设置可抽奖次数
$("#count").val(2);
$("#zdxz").val(1);
};
};
yt.init();
/***************************************/
function jpqh(type){
if(type=='left'){
if($('.jp img').attr('zdy')=='jp1')
{
$('.jp img').attr('src','images/jp2.png?vid=1.0');
$('.jp img').attr('zdy','jp2');
}
else{
$('.jp img').attr('src','images/jp1.png');
$('.jp img').attr('zdy','jp1');
}
}
else{
if($('.jp img').attr('zdy')=='jp1')
{
$('.jp img').attr('src','images/jp2.png?vid=1.0');
$('.jp img').attr('zdy','jp2');
}
else{
$('.jp img').attr('src','images/jp1.png');
$('.jp img').attr('zdy','jp1');
}
}
}
function qie(obj){
if($(obj).text()=='活动规则'){
//活动规则:
$("#jpgz").css('display','inline');
$("#jpzs").css('display','none');
$('#my2').css('background-color','#e1e1e1');
$('#my1').css('background-color','#ffffff');
}
else{
//奖品展示
$("#jpgz").css('display','none');
$("#jpzs").css('display','inline');
$('#my1').css('background-color','#e1e1e1');
$('#my2').css('background-color','#ffffff');
}
}
// 次数用完
function cs(){
var _tan=$(".tan");
var _bg = $(".bg");
_bg.fadeIn();
_tan.fadeIn();
_bg.click(function(){
_bg.fadeOut();
_tan.fadeOut();
});
}
$('#fximg').c | ck(function(){
var _fxbg=$('.fxbg');
var _tan=$(".tan");
var _bg = $(".bg");
_fxbg.fadeIn();
_bg.fadeOut();
_tan.fadeOut();
})
//分享成功
function fxsuccess(){
var _fxbg=$(".fxbg");
_fxbg.fadeIn();
}
$("#zp").click(function(){
var type=0;
var count=$("#count").val();
if($("#zdxz").val()==0){
// alert($("#zdxz").val());
return false;
}
var endTag = true;
//活动结束
//tanprd('<p>活动结束后</p><p>会有专人通知您的了!</p>');
// if(count<1){
// cs('');
// return;
// }
$.ajax({
async:false,
url:'server.php',
data:{act:'start'},
type:'post',
dataType:'json',
success:function(result){
if(result.errcode!=0)
{
if(result.errcode == 1003)
{
cs();
}
else
{
common('<p>系统提示</p>','<p>'+result.errmsg+'</p>');
}
endTag = false;
}
else
{
type=result.prize;
}
}
});
if(!endTag)
{
return false;
}
switch (type) {
case 1:
rotateFunc(1,0,'箭牌智能坐便器AKB1130');
count--;
break;
case 2:
rotateFunc(0,60,'谢谢参与');
count--;
break;
case 3:
rotateFunc(2,120,'箭牌卫浴智能马桶盖AK1002');
count--;
break;
case 4:
rotateFunc(0,180,'');
count--;
break;
case 5:
rotateFunc(3,240,'箭牌卫浴品牌优质毛巾');
count--;
break;
default:
rotateFunc(0,300,'');
count--;
}
$("#count").val(count);
$("#zdxz").val(0);
});
var rotateFunc = function(awards,angle,text){ //awards:奖项,angle:奖项对应的角度
var _zdpic=$('#zd');
_zdpic.stopRotate();
_zdpic.rotate({
angle: 0,
duration: 4000,
animateTo: angle + 1800, //angle是图片上各奖项对应的角度,1440是让指针固定旋转4圈
callback: function(){
tanprd(awards);
}
});
};
setInterval(function(){
$("#zdxz").val(1);
},8000);
function tanprd(type){
var _title=$("#tanprd2 .product-tilte");
var _con=$("#tanprd2 .product-con");
var _btn=$("#tanprd2 .product-btn");
//var _countcon=$("#tanprd2 .product-count");
//var count=1;//剩余抽奖次数
var _product=$('#tanprd2');
var _bg = $(".bg");
switch(type){
case 1:
_product.fadeIn();
_bg.fadeIn();
_title.html('<p>恭喜您!</p>');
_con.html('<p>获得了特等奖</p><p>箭牌智能坐便器</p><p>AKB1130!</p>');
_btn.html('<img src="images/btn3.png" width="50%" onclick="message(\'1\')">');
//_countcon.html('<p>您还剩有<font>'+count+'</font>次抽奖机会!</p>');
break;
case 2:
_product.fadeIn();
_bg.fadeIn();
_title.html('<p>恭喜您!</p>');
_con.html('<p>获得了优秀奖</p><p>箭牌卫浴智能马桶盖</p><p>AK1002!</p>');
_btn.html('<img src="images/btn3.png" width="50%" onclick="message(\'2\')">');
//_countcon.html('<p>您还剩有<font>'+count+'</font>次抽奖机会!</p>');
break;
| li | identifier_name |
my.js | isWap=yt.isWap(),
w = 720,
h = 1135;
var sl = function () {
var _w = $wraper1.width(),
h = $win.height(),
_h = isWap && _w<h?$win.height():_w * 1135 / 720;
$wrapers.height(_h);
if($win.height()<300){
$(".cn-slidetips").hide();
}else{
$(".cn-slidetips").show();
}
};
sl();
$win.resize(sl);
};
//滑动绑定函数
var onSlideChangeTime = 0;
var onSlideChange = function () {
if (onSlideChangeTime>1) {
return;
}
var index = mySwiper.activeIndex;
if (nowIndex == index && mySwiper.touches['abs'] < 50) {
return;
}
onSlideChangeTime = 20;
setAms();
nowIndex = index || 0;
//history.pushState(null, null, "index.html?p=" + (nowIndex + 1));
//执行动画
var timer=setInterval(function () {
onSlideChangeTime -= 1;
if (onSlideChangeTime == 0) {
clearInterval(timer);
}
},1);
};
//触摸结束绑定
var onTouchEnd = function () {
var index = mySwiper.index;
if (nowIndex == slideCount-1 && +mySwiper.touches['diff'] <-50) {
return mySwiper.swipeTo(0);
}
};
//滑动结束绑定
var onSlideChangeEnd = function () {
if(mySwiper.activeIndex==9){
$("#ewm").show();
}else{
$("#ewm").hide();
}
$(".swiper-slide").eq(mySwiper.activeIndex).find("img").each(function(){
if($(this).attr("date-src")){
$(this).attr("src",$(this).attr("date-src"));
}
});
onSlideChange();
};
//绑定滑动主函数
var bindSwiper = function () {
mySwiper = $swiperContainer.swiper({
onTouchEnd: onTouchEnd,
onSlideChangeEnd: onSlideChangeEnd,
//mousewheelControl:true,
mode: 'vertical'
});
};
//滚到下一个屏
var bindNext = function () {
$(".next").on("click", function () {
mySwiper.activeIndex = mySwiper.activeIndex || 0;
var index = mySwiper.activeIndex == slideCount - 1 ? 0 : (mySwiper.activeIndex||0) + 1;
mySwiper.swipeTo(index);
});
};
//初始化
bindSwiper();
bindNext();
setLayout();
setAms();
};
//初始化
yt.init = function () {
window.onload = function () {
$("#loading").hide();
setTimeout(yt.app);
//设置可抽奖次数
$("#count").val(2);
$("#zdxz").val(1);
};
};
yt.init();
/***************************************/
function jpqh(type){
if(type=='left'){
if($('.jp img').attr('zdy')=='jp1')
{
$('.jp img').attr('src','images/jp2.png?vid=1.0');
$('.jp img').attr('zdy','jp2');
}
else{
$('.jp img').attr('src','images/jp1.png');
$('.jp img').attr('zdy','jp1');
}
}
else{
if($('.jp img').attr('zdy')=='jp1')
{
$('.jp img').attr('src','images/jp2.png?vid=1.0');
$('.jp img').attr('zdy','jp2');
}
else{
$('.jp img').attr('src','images/jp1.png');
$('.jp img').attr('zdy','jp1');
}
}
}
function qie(obj){
if($(obj).text()=='活动规则'){
//活动规则:
$("#jpgz").css('display','inline');
$("#jpzs").css('display','none');
$('#my2').css('background-color','#e1e1e1');
$('#my1').css('background-color','#ffffff');
}
else{
//奖品展示
$("#jpgz").css('display','none');
$("#jpzs").css('display','inline');
$('#my1').css('background-color','#e1e1e1');
$('#my2').css('background-color','#ffffff');
}
}
// 次数用完
function cs(){
var _tan=$(".tan");
var _bg = $(".bg");
_bg.fadeIn();
_tan.fadeIn();
_bg.click(function(){
_bg.fadeOut();
_tan.fadeOut();
});
}
$('#fximg').click(function(){
var _fxbg=$('.fxbg');
var _tan=$(".tan");
var _bg = $(".bg");
_fxbg.fadeIn();
_bg.fadeOut();
_tan.fadeOut();
})
//分享成功
function fxsuccess(){
var _fxbg=$(".fxbg");
_fxbg.fadeIn();
}
$("#zp").click(function(){
var type=0;
var count=$("#count").val();
if($("#zdxz").val()==0){
// alert($("#zdxz").val());
return false;
}
var endTag = true;
//活动结束
//tanprd('<p>活动结束后</p><p>会有专人通知您的了!</p>');
// if(count<1){
// cs('');
// return;
// }
$.ajax({
async:false,
url:'server.php',
data:{act:'start'},
type:'post',
dataType:'json',
success:function(result){
if(result.errcode!=0)
{
if(result.errcode == 1003)
{
cs();
}
else
{
common('<p>系统提示</p>','<p>'+result.errmsg+'</p>');
}
endTag = false;
}
else
{
type=result.prize;
}
}
});
if(!endTag)
{
return false;
}
switch (type) {
case 1:
rotateFunc(1,0,'箭牌智能坐便器AKB1130');
count--;
break;
case 2:
rotateFunc(0,60,'谢谢参与');
count--;
break;
case 3:
rotateFunc(2,120,'箭牌卫浴智能马桶盖AK1002');
count--;
break;
case 4:
rotateFunc(0,180,'');
count--;
break;
case 5:
rotateFunc(3,240,'箭牌卫浴品牌优质毛巾');
count--;
break;
default:
rotateFunc(0,300,'');
count--;
}
$("#count").val(count);
$("#zdxz").val(0);
});
var rotateFunc = function(awards,angle,text){ //awards:奖项,angle:奖项对应的角度
var _zdpic=$('#zd');
_zdpic.stopRotate();
_zdpic.rotate({
angle: 0,
duration: 4000,
animateTo: angle + 1800, //angle是图片上各奖项对应的角度,1440是让指针固定旋转4圈
callback: function(){
tanprd(awards);
}
});
};
setInterval(function(){
$("#zdxz").val(1);
},8000);
function tanprd(type){
var _title=$("#tanprd2 .product-tilte");
var _con=$("#tanprd2 .product-con");
var _btn=$("#tanprd2 .product-btn");
//var _countcon=$("#tanprd2 .product-count");
//var count=1;//剩余抽奖次数
var _product=$('#tanprd2');
var _bg = $(".bg");
switch(type){
case 1:
_product.fadeIn();
_bg.fadeIn();
_title.html('<p>恭喜您!</p>');
_con.html('<p>获得了特等奖</p><p>箭牌智能坐便器</p><p>AKB1130!</p>');
_btn.html('<img src="images/btn3.png" width="50%" onclick="message(\'1\')">');
//_countcon.html('<p>您还剩有<font>'+count+'</font>次抽奖机会!</p>');
break;
case 2:
_product.fadeIn();
_bg.fadeIn();
_title.html('<p>恭喜您!</p>');
_con.html('<p>获得了优秀奖</p><p>箭牌卫浴智能马桶盖</p><p>AK1002!</p>');
_btn.html('<img | random_line_split |
||
my.js | lse;
};
//滑动绑定
yt.app = function () {
var $swiperContainer = $("#swiper-container1"),
$pages = $("#wrapper").children(),
$as = $("#nav li a"),
$lis = $("#nav li"),
$win =$(window),
slideCount = $pages.length,
nowIndex = 0,
acn = "animation",
mySwiper;
var params = {
selectorClassName: "swiper-container",
animationClassName: acn,
animationElm: $("." + acn)
};
var setCssText = function (prop, value) {
return prop + ': ' + value + '; ';
};
/*
* insertCss(rule)
* 向文档<head>底部插入css rule操作
* rule: 传入的css text
* */
var insertCss = function (rule) {
var head = document.head || document.getElementsByTagName('head')[0],
style;
if (!!head.getElementsByTagName('style').length) {
style = head.getElementsByTagName('style')[0];
if (style.styleSheet) {
style.styleSheet.cssText = rule;
} else {
style.innerHTML = '';
style.appendChild(document.createTextNode(rule));
}
} else {
style = document.createElement('style');
style.type = 'text/css';
if (style.styleSheet) {
style.styleSheet.cssText = rule;
} else {
style.appendChild(document.createTextNode(rule));
}
head.appendChild(style);
}
};
var setAnimationStyle=function() {
var cssText = '';
cssText += '.' + params.animationClassName + '{' +
setCssText('display', 'none') +
'}' +
'.touchstart .' + params.animationClassName + '{' +
setCssText('-webkit-animation-duration', '0 !important') +
setCssText('-webkit-animation-delay', '0 !important') +
setCssText('-webkit-animation-iteration-count', '1 !important') +
'}';
var index = mySwiper.activeIndex,
_index = index + 1,
$ans = $pages.eq(index).find('.' + params.animationClassName);
$ans.each(function () {
var obj = $(this);
_className = obj.attr('data-item'),
_animation = obj.attr('data-animation'),
_duration = ((obj.attr('data-duration') / 1000) || 1) + 's',
_timing = obj.attr('data-timing-function') || 'ease',
_delay = ((obj.attr('data-delay') || 0) / 1000) + 's',
_count = obj.attr('data-iteration-count') || 1;
var _t = '.' + params.selectorClassName +
' .page-' + _index +
' .' + _className;
cssText += _t + '{' +
setCssText('display', 'block !important') +
setCssText('-webkit-animation-name', _animation) +
setCssText('-webkit-animation-duration', _duration) +
setCssText('-webkit-animation-timing-function', _timing) +
setCssText('-webkit-animation-delay', _delay) +
setCssText('-webkit-animation-fill-mode', 'both') +
setCssText('-webkit-animation-iteration-count', _count) +
'}';
});
return cssText;
};
//设置动画
var setAms = function () {
insertCss(setAnimationStyle());
};
//设置布局
var setLayout = function () {
var $wrapers = $("#swiper-container1 .wraper"),
$wraper1 = $("#wraper1"),
isWap=yt.isWap(),
w = 720,
h = 1135;
var sl = function () {
var _w = $wraper1.width(),
h = $win.height(),
_h = isWap && _w<h?$win.height():_w * 1135 / 720;
$wrapers.height(_h);
if($win.height()<300){
$(".cn-slidetips").hide();
}else{
$(".cn-slidetips").show();
}
};
sl();
$win.resize(sl);
};
//滑动绑定函数
var onSlideChangeTime = 0;
var onSlideChange = function () {
if (onSlideChangeTime>1) {
return;
}
var index = mySwiper.activeIndex;
if (nowIndex == index && mySwiper.touches['abs'] < 50) {
return;
}
onSlideChangeTime = 20;
setAms();
nowIndex = index || 0;
//history.pushState(null, null, "index.html?p=" + (nowIndex + 1));
//执行动画
var timer=setInterval(function () {
onSlideChangeTime -= 1;
if (onSlideChangeTime == 0) {
clearInterval(timer);
}
},1);
};
//触摸结束绑定
var onTouchEnd = function () {
var index = mySwiper.index;
if (nowIndex == slideCount-1 && +mySwiper.touches['diff'] <-50) {
return mySwiper.swipeTo(0);
}
};
//滑动结束绑定
var onSlideChangeEnd = function () {
if(mySwiper.activeIndex==9){
$("#ewm").show();
}else{
$("#ewm").hide();
}
$(".swiper-slide").eq(mySwiper.activeIndex).find("img").each(function(){
if($(this).attr("date-src")){
$(this).attr("src",$(this).attr("date-src"));
}
});
onSlideChange();
};
//绑定滑动主函数
var bindSwiper = function () {
mySwiper = $swiperContainer.swiper({
onTouchEnd: onTouchEnd,
onSlideChangeEnd: onSlideChangeEnd,
//mousewheelControl:true,
mode: 'vertical'
});
};
//滚到下一个屏
var bindNext = function () {
$(".next").on("click", function () {
mySwiper.activeIndex = mySwiper.activeIndex || 0;
var index = mySwiper.activeIndex == slideCount - 1 ? 0 : (mySwiper.activeIndex||0) + 1;
mySwiper.swipeTo(index);
});
};
//初始化
bindSwiper();
bindNext();
setLayout();
setAms();
};
//初始化
yt.init = function () {
window.onload = function () {
$("#loading").hide();
setTimeout(yt.app);
//设置可抽奖次数
$("#count").val(2);
$("#zdxz").val(1);
};
};
yt.init();
/***************************************/
function jpqh(type){
if(type=='left'){
if($('.jp img').attr('zdy')=='jp1')
{
$('.jp img').attr('src','images/jp2.png?vid=1.0');
$('.jp img').attr('zdy','jp2');
}
else{
$('.jp img').attr('src','images/jp1.png');
$('.jp img').attr('zdy','jp1');
}
}
else{
if($('.jp img').attr('zdy')=='jp1')
{
$('.jp img').attr('src','images/jp2.png?vid=1.0');
$('.jp img').attr('zdy','jp2');
}
else{
$('.jp img').attr('src','images/jp1.png');
$('.jp img').attr('zdy','jp1');
}
}
}
function qie(obj){
if($(obj).text()=='活动规则'){
//活动规则:
$("#jpgz").css('display','inline');
$("#jpzs").css('display','none');
$('#my2').css('background-color','#e1e1e1');
$('#my1').css('background-color','#ffffff');
}
else{
//奖品展示
$("#jpgz").css('display','none');
$("#jpzs").css('display','inline');
$('#my1').css('background-color','#e1e1e1');
$('#my2').css('background-color','#ffffff');
}
}
// 次数用完
function cs(){
var _tan=$(".tan");
var _bg = $(".bg");
_bg.fadeIn();
_tan.fadeIn();
_bg.click(function(){
_bg.fadeOut();
_tan.fadeOut();
});
}
$('#fximg').click(function(){
var _fxbg=$('.fxbg');
var _tan=$(".tan");
var _bg = $(".bg");
_fxbg.fadeIn();
_bg.fadeOut();
_tan.fadeOut();
})
//分享成功
function fxsuccess(){
var _fxbg=$(".fxbg");
_fxbg.fadeIn();
}
$("#zp").click(function(){
var type=0;
var count=$("#count").val();
if($("#zdxz").val()==0){
// alert($("#zdxz").val());
return false;
}
| }
return fa | conditional_block |
|
my.js | zdy','jp1');
}
}
else{
if($('.jp img').attr('zdy')=='jp1')
{
$('.jp img').attr('src','images/jp2.png?vid=1.0');
$('.jp img').attr('zdy','jp2');
}
else{
$('.jp img').attr('src','images/jp1.png');
$('.jp img').attr('zdy','jp1');
}
}
}
function qie(obj){
if($(obj).text()=='活动规则'){
//活动规则:
$("#jpgz").css('display','inline');
$("#jpzs").css('display','none');
$('#my2').css('background-color','#e1e1e1');
$('#my1').css('background-color','#ffffff');
}
else{
//奖品展示
$("#jpgz").css('display','none');
$("#jpzs").css('display','inline');
$('#my1').css('background-color','#e1e1e1');
$('#my2').css('background-color','#ffffff');
}
}
// 次数用完
function cs(){
var _tan=$(".tan");
var _bg = $(".bg");
_bg.fadeIn();
_tan.fadeIn();
_bg.click(function(){
_bg.fadeOut();
_tan.fadeOut();
});
}
$('#fximg').click(function(){
var _fxbg=$('.fxbg');
var _tan=$(".tan");
var _bg = $(".bg");
_fxbg.fadeIn();
_bg.fadeOut();
_tan.fadeOut();
})
//分享成功
function fxsuccess(){
var _fxbg=$(".fxbg");
_fxbg.fadeIn();
}
$("#zp").click(function(){
var type=0;
var count=$("#count").val();
if($("#zdxz").val()==0){
// alert($("#zdxz").val());
return false;
}
var endTag = true;
//活动结束
//tanprd('<p>活动结束后</p><p>会有专人通知您的了!</p>');
// if(count<1){
// cs('');
// return;
// }
$.ajax({
async:false,
url:'server.php',
data:{act:'start'},
type:'post',
dataType:'json',
success:function(result){
if(result.errcode!=0)
{
if(result.errcode == 1003)
{
cs();
}
else
{
common('<p>系统提示</p>','<p>'+result.errmsg+'</p>');
}
endTag = false;
}
else
{
type=result.prize;
}
}
});
if(!endTag)
{
return false;
}
switch (type) {
case 1:
rotateFunc(1,0,'箭牌智能坐便器AKB1130');
count--;
break;
case 2:
rotateFunc(0,60,'谢谢参与');
count--;
break;
case 3:
rotateFunc(2,120,'箭牌卫浴智能马桶盖AK1002');
count--;
break;
case 4:
rotateFunc(0,180,'');
count--;
break;
case 5:
rotateFunc(3,240,'箭牌卫浴品牌优质毛巾');
count--;
break;
default:
rotateFunc(0,300,'');
count--;
}
$("#count").val(count);
$("#zdxz").val(0);
});
var rotateFunc = function(awards,angle,text){ //awards:奖项,angle:奖项对应的角度
var _zdpic=$('#zd');
_zdpic.stopRotate();
_zdpic.rotate({
angle: 0,
duration: 4000,
animateTo: angle + 1800, //angle是图片上各奖项对应的角度,1440是让指针固定旋转4圈
callback: function(){
tanprd(awards);
}
});
};
setInterval(function(){
$("#zdxz").val(1);
},8000);
function tanprd(type){
var _title=$("#tanprd2 .product-tilte");
var _con=$("#tanprd2 .product-con");
var _btn=$("#tanprd2 .product-btn");
//var _countcon=$("#tanprd2 .product-count");
//var count=1;//剩余抽奖次数
var _product=$('#tanprd2');
var _bg = $(".bg");
switch(type){
case 1:
_product.fadeIn();
_bg.fadeIn();
_title.html('<p>恭喜您!</p>');
_con.html('<p>获得了特等奖</p><p>箭牌智能坐便器</p><p>AKB1130!</p>');
_btn.html('<img src="images/btn3.png" width="50%" onclick="message(\'1\')">');
//_countcon.html('<p>您还剩有<font>'+count+'</font>次抽奖机会!</p>');
break;
case 2:
_product.fadeIn();
_bg.fadeIn();
_title.html('<p>恭喜您!</p>');
_con.html('<p>获得了优秀奖</p><p>箭牌卫浴智能马桶盖</p><p>AK1002!</p>');
_btn.html('<img src="images/btn3.png" width="50%" onclick="message(\'2\')">');
//_countcon.html('<p>您还剩有<font>'+count+'</font>次抽奖机会!</p>');
break;
case 3:
_product.fadeIn();
_bg.fadeIn();
_title.html('<p>恭喜您!</p>');
_con.html('<p>获得了参与奖</p><p>箭牌卫浴品牌优质毛巾</p>');
_btn.html('<img src="images/btn3.png" width="50%" onclick="message(\'3\')">');
//_countcon.html('<p>您还剩有<font>'+count+'</font>次抽奖机会!</p>');
break;
case 0:
cs();
// _title.html('<p>很遗憾。</p>');
// _con.html('<p>感谢您的参与,下次中</p><p>奖机会一定属于您!</p>');
// _btn.html('<a href="index.php"><img src="images/btn4.png" width="50%"></a>');
// _bg.click(function(){
// _bg.fadeOut();
// _product.fadeOut();
// });
break;
default:
}
}
//公共弹窗
function common(title,text){
var _title=$("#tanprd .product-tilte");
var _con=$("#tanprd .product-con");
var _btn=$("#tanprd .product-btn");
var _product=$('#tanprd');
var _bg = $(".bg");
_product.fadeIn();
_bg.fadeIn();
_title.html(title);
_con.html(text);
_btn.html('<a id="combtn"><img src="images/btn2.png" width="50%"></a>');
// _bg.click(function(){
// _bg.fadeOut();
// _product.fadeOut();
// });
$("#combtn").click(function(){
_bg.fadeOut();
_product.fadeOut();
});
}
//填写个人资料
function message(type){
var _type=type;//奖项
var _msg=$('#messageid');
var _bg = $(".bg");
var _product=$('#tanprd2');
_product.fadeOut();
_msg.fadeIn();
_bg.fadeIn();
}
function zh(obj){
var _msg=$('#messageid');
// var _product=$('#tanprd');
// var _bg = $(".bg");
var errTag=true;
var name = $("#name").val();
var phone = $("#phone").val();
var address = $("#address").val();
console.log(name);
$.ajax({
async:false,
url:'server.php',
data:{act:'addinfo',name:name,phone:phone,address:address},
type:'post',
dataType:'json',
success:function(result){
if(result.errcode!=0)
{
alert(result.errmsg);
errTag = false;
}
else
{
return false;
}
}
});
if(!errTag)
{
return false;
}
_msg.fadeOut();
//_product.fadeIn();
common('<p | >谢谢您参与!</p>','<p>活动结束后</p><p>会有专人通知您的了!</p>')
// $("#end").click(function(){
// _bg.fadeOut();
// _product.fadeOut();
// });
}
function producttag(type){
var _bg = $(".bg");
var _tag=$("#tag");
_bg.fadeIn();
_tag.fadeIn();
var _tagpic=$("#tag .prd-tag-pic");
var _tagtitle=$("#tag .prd-tag-title");
var _tagcon=$("#tag .prd-tag-con");
var _tagtype=$("#tag .prd-tag-type"); | identifier_body |
|
client_dns.go | string) (string, error) {
domains, err := d.getDomainsWithCache(ctx)
if err != nil {
return "", err
}
domain, ok := domains[domainId]
if !ok {
return "", fmt.Errorf("DNS domain with id %s not found", domainId)
}
return CompositeDomainName(domain.DomainName, domain.DomainId), nil
}
// CreateOrUpdateDomainRecords creates or updates the domain records with the given domain name, name, record type,
// values, and ttl.
// * For each element in values that has an existing domain record, the existing record is updated if needed.
// * For each element in values that doesn't have an existing domain record, a new domain record is created.
// * For each existing domain record that doesn't have a corresponding element in values, the existing record is deleted.
func (d *dnsClient) CreateOrUpdateDomainRecords(ctx context.Context, domainName, name, recordType string, values []string, ttl int64) error {
domainName, _ = DomainNameAndId(domainName)
rr, err := getRR(name, domainName)
if err != nil {
return err
}
records, err := d.getDomainRecords(ctx, domainName, rr, recordType)
if err != nil {
return err
}
for _, value := range values {
if record, ok := records[value]; ok {
// Only update the existing domain record if the current TTL value is different from the given one
// At this point we know that rr, recordType, and value are the same
if record.TTL != ttl {
if err := d.updateDomainRecord(ctx, record.RecordId, rr, recordType, value, ttl); err != nil {
return err
}
}
} else {
if err := d.createDomainRecord(ctx, domainName, rr, recordType, value, ttl); err != nil {
return err
}
}
}
for value, record := range records {
if !utils.ValueExists(value, values) {
if err := d.deleteDomainRecord(ctx, record.RecordId); err != nil {
return err
}
}
}
return nil
}
// DeleteDomainRecords deletes the domain records with the given domain name, name and record type.
func (d *dnsClient) DeleteDomainRecords(ctx context.Context, domainName, name, recordType string) error {
domainName, _ = DomainNameAndId(domainName)
rr, err := getRR(name, domainName)
if err != nil {
return err
}
records, err := d.getDomainRecords(ctx, domainName, rr, recordType)
if err != nil {
return err
}
for _, record := range records {
if err := d.deleteDomainRecord(ctx, record.RecordId); err != nil {
return err
}
}
return nil
}
func (d *dnsClient) getDomainsWithCache(ctx context.Context) (map[string]alidns.Domain, error) {
// cache.Expiring Get and Set methods are concurrency-safe.
// However, if an accessKeyID is not present in the cache and multiple DNSRecords are reconciled at the same time,
// it may happen that getDomains is called multiple times instead of just one, so use a mutex to guard against this.
// It is ok to use a shared mutex here as far as the number of accessKeyIDs using custom domains is low.
// This may need to be revisited with a larger number of such accessKeyIDs to avoid them blocking each other
// during the (potentially long-running) call to getDomains.
d.domainsCacheMutex.Lock()
defer d.domainsCacheMutex.Unlock()
if v, ok := d.domainsCache.Get(d.accessKeyID); ok {
return v.(map[string]alidns.Domain), nil
}
domains, err := d.getDomains(ctx)
if err != nil {
return nil, err
}
d.domainsCache.Set(d.accessKeyID, domains, domainsCacheTTL)
return domains, nil
}
// getDomains returns all domains.
func (d *dnsClient) getDomains(ctx context.Context) (map[string]alidns.Domain, error) {
if err := d.waitForAliDNSRateLimiter(ctx); err != nil {
return nil, err
}
domains := make(map[string]alidns.Domain)
pageSize, pageNumber := 20, 1
req := alidns.CreateDescribeDomainsRequest()
req.PageSize = requests.NewInteger(pageSize)
for {
req.PageNumber = requests.NewInteger(pageNumber)
resp, err := d.Client.DescribeDomains(req)
if err != nil {
return nil, err
}
for _, domain := range resp.Domains.Domain {
domains[domain.DomainId] = getDomainFromResponse(domain)
}
if resp.PageNumber*int64(pageSize) >= resp.TotalCount {
break
}
pageNumber++
}
return domains, nil
}
func getDomainFromResponse(domainFromResp alidns.DomainInDescribeDomains) alidns.Domain {
return alidns.Domain{
DomainId: domainFromResp.DomainId,
DomainName: domainFromResp.DomainName,
AliDomain: domainFromResp.AliDomain,
CreateTimestamp: domainFromResp.CreateTimestamp,
//ExpireTimestamp: ,
InstanceEndTime: domainFromResp.InstanceEndTime,
CreateTime: domainFromResp.CreateTime,
// SourceProtocol: domainFromResp.SourceProtocol,
GroupName: domainFromResp.GroupName,
VersionCode: domainFromResp.VersionCode,
// UpdateTimestamp: domainFromResp.UpdateTimestamp,
RecordCount: domainFromResp.RecordCount,
InstanceExpired: domainFromResp.InstanceExpired,
ResourceGroupId: domainFromResp.ResourceGroupId,
// CacheTtlMin: domainFromResp.CacheTtlMin,
InstanceId: domainFromResp.InstanceId,
//ExpireTime:,
GroupId: domainFromResp.GroupId,
// SourceEdns: domainFromResp.SourceEdns,
RegistrantEmail: domainFromResp.RegistrantEmail,
VersionName: domainFromResp.VersionName,
// UpdateTime: domainFromResp.UpdateTime,
Remark: domainFromResp.Remark,
// CacheTtlMax: domainFromResp.CacheTtlMax,
PunyCode: domainFromResp.PunyCode,
Starmark: domainFromResp.Starmark,
// DnsServers: domainFromResp.DnsServers,
Tags: domainFromResp.Tags,
// SourceDnsServers: domainFromResp.SourceDnsServers,
}
}
// getDomainRecords returns the domain records with the given domain name, rr, and record type.
func (d *dnsClient) getDomainRecords(ctx context.Context, domainName, rr, recordType string) (map[string]alidns.Record, error) {
if err := d.waitForAliDNSRateLimiter(ctx); err != nil {
return nil, err
}
records := make(map[string]alidns.Record)
pageSize, pageNumber := 20, 1
req := alidns.CreateDescribeDomainRecordsRequest()
req.PageSize = requests.NewInteger(pageSize)
for {
req.PageNumber = requests.NewInteger(pageNumber)
req.DomainName = domainName
req.RRKeyWord = rr
req.TypeKeyWord = recordType
resp, err := d.Client.DescribeDomainRecords(req)
if err != nil {
return nil, err
}
for _, record := range resp.DomainRecords.Record {
records[record.Value] = record
}
if resp.PageNumber*int64(pageSize) >= resp.TotalCount {
break
}
pageNumber++
}
return records, nil
}
func (d *dnsClient) createDomainRecord(ctx context.Context, domainName, rr, recordType, value string, ttl int64) error {
if err := d.waitForAliDNSRateLimiter(ctx); err != nil {
return err
}
req := alidns.CreateAddDomainRecordRequest()
req.DomainName = domainName
req.RR = rr
req.Type = recordType
req.Value = value
req.TTL = requests.NewInteger(int(ttl))
_, err := d.Client.AddDomainRecord(req)
return err
}
func (d *dnsClient) updateDomainRecord(ctx context.Context, id, rr, recordType, value string, ttl int64) error {
if err := d.waitForAliDNSRateLimiter(ctx); err != nil {
return err
}
req := alidns.CreateUpdateDomainRecordRequest()
req.RecordId = id
req.RR = rr
req.Type = recordType
req.Value = value
req.TTL = requests.NewInteger(int(ttl))
_, err := d.Client.UpdateDomainRecord(req)
return err
}
func (d *dnsClient) deleteDomainRecord(ctx context.Context, id string) error | {
if err := d.waitForAliDNSRateLimiter(ctx); err != nil {
return err
}
req := alidns.CreateDeleteDomainRecordRequest()
req.RecordId = id
if _, err := d.Client.DeleteDomainRecord(req); err != nil && !isDomainRecordDoesNotExistError(err) {
return err
}
return nil
} | identifier_body |
|
client_dns.go | ID, accessKeySecret)
if err != nil {
return nil, err
}
return &dnsClient{
Client: *client,
accessKeyID: accessKeyID,
domainsCache: f.domainsCache,
domainsCacheMutex: &f.domainsCacheMutex,
RateLimiter: f.getRateLimiter(accessKeyID),
RateLimiterWaitTimeout: f.waitTimeout,
Logger: log.Log.WithName("ali-dnsclient"),
}, nil
}
func (f *clientFactory) getRateLimiter(accessKeyID string) *rate.Limiter {
// cache.Expiring Get and Set methods are concurrency-safe
// However, if f rate limiter is not present in the cache, it may happen that multiple rate limiters are created
// at the same time for the same access key id, and the desired QPS is exceeded, so use f mutex to guard against this
f.rateLimitersMutex.Lock()
defer f.rateLimitersMutex.Unlock()
// Get f rate limiter from the cache, or create f new one if not present
var rateLimiter *rate.Limiter
if v, ok := f.rateLimiters.Get(accessKeyID); ok {
rateLimiter = v.(*rate.Limiter)
} else {
rateLimiter = rate.NewLimiter(f.limit, f.burst)
}
// Set should be called on every Get with cache.Expiring to refresh the TTL
f.rateLimiters.Set(accessKeyID, rateLimiter, rateLimiterCacheTTL)
return rateLimiter
}
// GetDomainNames returns a map of all domain names mapped to their composite domain names.
func (d *dnsClient) GetDomainNames(ctx context.Context) (map[string]string, error) {
domains, err := d.getDomainsWithCache(ctx)
if err != nil {
return nil, err
}
domainNames := make(map[string]string)
for _, domain := range domains {
domainNames[domain.DomainName] = CompositeDomainName(domain.DomainName, domain.DomainId)
}
return domainNames, nil
}
// GetDomainName returns the composite domain name of the domain with the given domain id.
func (d *dnsClient) GetDomainName(ctx context.Context, domainId string) (string, error) {
domains, err := d.getDomainsWithCache(ctx)
if err != nil {
return "", err
}
domain, ok := domains[domainId]
if !ok {
return "", fmt.Errorf("DNS domain with id %s not found", domainId)
}
return CompositeDomainName(domain.DomainName, domain.DomainId), nil
}
// CreateOrUpdateDomainRecords creates or updates the domain records with the given domain name, name, record type,
// values, and ttl.
// * For each element in values that has an existing domain record, the existing record is updated if needed.
// * For each element in values that doesn't have an existing domain record, a new domain record is created.
// * For each existing domain record that doesn't have a corresponding element in values, the existing record is deleted.
func (d *dnsClient) CreateOrUpdateDomainRecords(ctx context.Context, domainName, name, recordType string, values []string, ttl int64) error {
domainName, _ = DomainNameAndId(domainName)
rr, err := getRR(name, domainName)
if err != nil {
return err
}
records, err := d.getDomainRecords(ctx, domainName, rr, recordType)
if err != nil {
return err
}
for _, value := range values {
if record, ok := records[value]; ok {
// Only update the existing domain record if the current TTL value is different from the given one
// At this point we know that rr, recordType, and value are the same
if record.TTL != ttl {
if err := d.updateDomainRecord(ctx, record.RecordId, rr, recordType, value, ttl); err != nil {
return err
}
}
} else {
if err := d.createDomainRecord(ctx, domainName, rr, recordType, value, ttl); err != nil {
return err
}
}
}
for value, record := range records {
if !utils.ValueExists(value, values) {
if err := d.deleteDomainRecord(ctx, record.RecordId); err != nil {
return err
}
}
}
return nil
}
// DeleteDomainRecords deletes the domain records with the given domain name, name and record type.
func (d *dnsClient) DeleteDomainRecords(ctx context.Context, domainName, name, recordType string) error {
domainName, _ = DomainNameAndId(domainName)
rr, err := getRR(name, domainName)
if err != nil {
return err
}
records, err := d.getDomainRecords(ctx, domainName, rr, recordType)
if err != nil {
return err
}
for _, record := range records {
if err := d.deleteDomainRecord(ctx, record.RecordId); err != nil {
return err
}
}
return nil
}
func (d *dnsClient) getDomainsWithCache(ctx context.Context) (map[string]alidns.Domain, error) {
// cache.Expiring Get and Set methods are concurrency-safe.
// However, if an accessKeyID is not present in the cache and multiple DNSRecords are reconciled at the same time,
// it may happen that getDomains is called multiple times instead of just one, so use a mutex to guard against this.
// It is ok to use a shared mutex here as far as the number of accessKeyIDs using custom domains is low.
// This may need to be revisited with a larger number of such accessKeyIDs to avoid them blocking each other
// during the (potentially long-running) call to getDomains.
d.domainsCacheMutex.Lock()
defer d.domainsCacheMutex.Unlock()
if v, ok := d.domainsCache.Get(d.accessKeyID); ok {
return v.(map[string]alidns.Domain), nil
}
domains, err := d.getDomains(ctx)
if err != nil {
return nil, err
}
d.domainsCache.Set(d.accessKeyID, domains, domainsCacheTTL)
return domains, nil
}
// getDomains returns all domains.
func (d *dnsClient) getDomains(ctx context.Context) (map[string]alidns.Domain, error) {
if err := d.waitForAliDNSRateLimiter(ctx); err != nil {
return nil, err
}
domains := make(map[string]alidns.Domain)
pageSize, pageNumber := 20, 1
req := alidns.CreateDescribeDomainsRequest()
req.PageSize = requests.NewInteger(pageSize)
for {
req.PageNumber = requests.NewInteger(pageNumber)
resp, err := d.Client.DescribeDomains(req)
if err != nil {
return nil, err
}
for _, domain := range resp.Domains.Domain {
domains[domain.DomainId] = getDomainFromResponse(domain)
}
if resp.PageNumber*int64(pageSize) >= resp.TotalCount {
break
}
pageNumber++
}
return domains, nil
}
func getDomainFromResponse(domainFromResp alidns.DomainInDescribeDomains) alidns.Domain {
return alidns.Domain{
DomainId: domainFromResp.DomainId,
DomainName: domainFromResp.DomainName,
AliDomain: domainFromResp.AliDomain,
CreateTimestamp: domainFromResp.CreateTimestamp,
//ExpireTimestamp: ,
InstanceEndTime: domainFromResp.InstanceEndTime,
CreateTime: domainFromResp.CreateTime,
// SourceProtocol: domainFromResp.SourceProtocol,
GroupName: domainFromResp.GroupName,
VersionCode: domainFromResp.VersionCode,
// UpdateTimestamp: domainFromResp.UpdateTimestamp,
RecordCount: domainFromResp.RecordCount,
InstanceExpired: domainFromResp.InstanceExpired,
ResourceGroupId: domainFromResp.ResourceGroupId,
// CacheTtlMin: domainFromResp.CacheTtlMin,
InstanceId: domainFromResp.InstanceId,
//ExpireTime:,
GroupId: domainFromResp.GroupId,
// SourceEdns: domainFromResp.SourceEdns,
RegistrantEmail: domainFromResp.RegistrantEmail,
VersionName: domainFromResp.VersionName,
// UpdateTime: domainFromResp.UpdateTime,
Remark: domainFromResp.Remark,
// CacheTtlMax: domainFromResp.CacheTtlMax,
PunyCode: domainFromResp.PunyCode,
Starmark: domainFromResp.Starmark,
// DnsServers: domainFromResp.DnsServers,
Tags: domainFromResp.Tags,
// SourceDnsServers: domainFromResp.SourceDnsServers,
}
}
// getDomainRecords returns the domain records with the given domain name, rr, and record type.
func (d *dnsClient) getDomainRecords(ctx context.Context, domainName, rr, recordType string) (map[string]alidns.Record, error) { | if err := d.waitForAliDNSRateLimiter(ctx); err != nil {
return nil, err
} | random_line_split |
|
client_dns.go | nil {
return err
}
for _, value := range values {
if record, ok := records[value]; ok {
// Only update the existing domain record if the current TTL value is different from the given one
// At this point we know that rr, recordType, and value are the same
if record.TTL != ttl {
if err := d.updateDomainRecord(ctx, record.RecordId, rr, recordType, value, ttl); err != nil {
return err
}
}
} else {
if err := d.createDomainRecord(ctx, domainName, rr, recordType, value, ttl); err != nil {
return err
}
}
}
for value, record := range records {
if !utils.ValueExists(value, values) {
if err := d.deleteDomainRecord(ctx, record.RecordId); err != nil {
return err
}
}
}
return nil
}
// DeleteDomainRecords deletes the domain records with the given domain name, name and record type.
func (d *dnsClient) DeleteDomainRecords(ctx context.Context, domainName, name, recordType string) error {
domainName, _ = DomainNameAndId(domainName)
rr, err := getRR(name, domainName)
if err != nil {
return err
}
records, err := d.getDomainRecords(ctx, domainName, rr, recordType)
if err != nil {
return err
}
for _, record := range records {
if err := d.deleteDomainRecord(ctx, record.RecordId); err != nil {
return err
}
}
return nil
}
func (d *dnsClient) getDomainsWithCache(ctx context.Context) (map[string]alidns.Domain, error) {
// cache.Expiring Get and Set methods are concurrency-safe.
// However, if an accessKeyID is not present in the cache and multiple DNSRecords are reconciled at the same time,
// it may happen that getDomains is called multiple times instead of just one, so use a mutex to guard against this.
// It is ok to use a shared mutex here as far as the number of accessKeyIDs using custom domains is low.
// This may need to be revisited with a larger number of such accessKeyIDs to avoid them blocking each other
// during the (potentially long-running) call to getDomains.
d.domainsCacheMutex.Lock()
defer d.domainsCacheMutex.Unlock()
if v, ok := d.domainsCache.Get(d.accessKeyID); ok {
return v.(map[string]alidns.Domain), nil
}
domains, err := d.getDomains(ctx)
if err != nil {
return nil, err
}
d.domainsCache.Set(d.accessKeyID, domains, domainsCacheTTL)
return domains, nil
}
// getDomains returns all domains.
func (d *dnsClient) getDomains(ctx context.Context) (map[string]alidns.Domain, error) {
if err := d.waitForAliDNSRateLimiter(ctx); err != nil {
return nil, err
}
domains := make(map[string]alidns.Domain)
pageSize, pageNumber := 20, 1
req := alidns.CreateDescribeDomainsRequest()
req.PageSize = requests.NewInteger(pageSize)
for {
req.PageNumber = requests.NewInteger(pageNumber)
resp, err := d.Client.DescribeDomains(req)
if err != nil {
return nil, err
}
for _, domain := range resp.Domains.Domain {
domains[domain.DomainId] = getDomainFromResponse(domain)
}
if resp.PageNumber*int64(pageSize) >= resp.TotalCount {
break
}
pageNumber++
}
return domains, nil
}
func getDomainFromResponse(domainFromResp alidns.DomainInDescribeDomains) alidns.Domain {
return alidns.Domain{
DomainId: domainFromResp.DomainId,
DomainName: domainFromResp.DomainName,
AliDomain: domainFromResp.AliDomain,
CreateTimestamp: domainFromResp.CreateTimestamp,
//ExpireTimestamp: ,
InstanceEndTime: domainFromResp.InstanceEndTime,
CreateTime: domainFromResp.CreateTime,
// SourceProtocol: domainFromResp.SourceProtocol,
GroupName: domainFromResp.GroupName,
VersionCode: domainFromResp.VersionCode,
// UpdateTimestamp: domainFromResp.UpdateTimestamp,
RecordCount: domainFromResp.RecordCount,
InstanceExpired: domainFromResp.InstanceExpired,
ResourceGroupId: domainFromResp.ResourceGroupId,
// CacheTtlMin: domainFromResp.CacheTtlMin,
InstanceId: domainFromResp.InstanceId,
//ExpireTime:,
GroupId: domainFromResp.GroupId,
// SourceEdns: domainFromResp.SourceEdns,
RegistrantEmail: domainFromResp.RegistrantEmail,
VersionName: domainFromResp.VersionName,
// UpdateTime: domainFromResp.UpdateTime,
Remark: domainFromResp.Remark,
// CacheTtlMax: domainFromResp.CacheTtlMax,
PunyCode: domainFromResp.PunyCode,
Starmark: domainFromResp.Starmark,
// DnsServers: domainFromResp.DnsServers,
Tags: domainFromResp.Tags,
// SourceDnsServers: domainFromResp.SourceDnsServers,
}
}
// getDomainRecords returns the domain records with the given domain name, rr, and record type.
func (d *dnsClient) getDomainRecords(ctx context.Context, domainName, rr, recordType string) (map[string]alidns.Record, error) {
if err := d.waitForAliDNSRateLimiter(ctx); err != nil {
return nil, err
}
records := make(map[string]alidns.Record)
pageSize, pageNumber := 20, 1
req := alidns.CreateDescribeDomainRecordsRequest()
req.PageSize = requests.NewInteger(pageSize)
for {
req.PageNumber = requests.NewInteger(pageNumber)
req.DomainName = domainName
req.RRKeyWord = rr
req.TypeKeyWord = recordType
resp, err := d.Client.DescribeDomainRecords(req)
if err != nil {
return nil, err
}
for _, record := range resp.DomainRecords.Record {
records[record.Value] = record
}
if resp.PageNumber*int64(pageSize) >= resp.TotalCount {
break
}
pageNumber++
}
return records, nil
}
func (d *dnsClient) createDomainRecord(ctx context.Context, domainName, rr, recordType, value string, ttl int64) error {
if err := d.waitForAliDNSRateLimiter(ctx); err != nil {
return err
}
req := alidns.CreateAddDomainRecordRequest()
req.DomainName = domainName
req.RR = rr
req.Type = recordType
req.Value = value
req.TTL = requests.NewInteger(int(ttl))
_, err := d.Client.AddDomainRecord(req)
return err
}
func (d *dnsClient) updateDomainRecord(ctx context.Context, id, rr, recordType, value string, ttl int64) error {
if err := d.waitForAliDNSRateLimiter(ctx); err != nil {
return err
}
req := alidns.CreateUpdateDomainRecordRequest()
req.RecordId = id
req.RR = rr
req.Type = recordType
req.Value = value
req.TTL = requests.NewInteger(int(ttl))
_, err := d.Client.UpdateDomainRecord(req)
return err
}
func (d *dnsClient) deleteDomainRecord(ctx context.Context, id string) error {
if err := d.waitForAliDNSRateLimiter(ctx); err != nil {
return err
}
req := alidns.CreateDeleteDomainRecordRequest()
req.RecordId = id
if _, err := d.Client.DeleteDomainRecord(req); err != nil && !isDomainRecordDoesNotExistError(err) {
return err
}
return nil
}
func (d *dnsClient) waitForAliDNSRateLimiter(ctx context.Context) error {
timeoutCtx, cancel := context.WithTimeout(ctx, d.RateLimiterWaitTimeout)
defer cancel()
t := time.Now()
if err := d.RateLimiter.Wait(timeoutCtx); err != nil {
return &RateLimiterWaitError{Cause: err}
}
if waitDuration := time.Since(t); waitDuration.Seconds() > 1/float64(d.RateLimiter.Limit()) {
d.Logger.Info("Waited for client-side aliyun DNS rate limiter", "waitDuration", waitDuration.String())
}
return nil
}
func getRR(name, domainName string) (string, error) {
if name == domainName {
return "@", nil
}
suffix := "." + domainName
if !strings.HasSuffix(name, suffix) {
return "", fmt.Errorf("name %s does not match domain name %s", name, domainName)
}
return strings.TrimSuffix(name, suffix), nil
}
func isDomainRecordDoesNotExistError(err error) bool {
if serverError, ok := err.(*errors.ServerError); ok {
if serverError.ErrorCode() == alicloud.ErrorCodeDomainRecordNotBelongToUser | {
return true
} | conditional_block |
|
client_dns.go | (region, accessKeyID, accessKeySecret string) (DNS, error) {
client, err := alidns.NewClientWithAccessKey(region, accessKeyID, accessKeySecret)
if err != nil {
return nil, err
}
return &dnsClient{
Client: *client,
accessKeyID: accessKeyID,
domainsCache: f.domainsCache,
domainsCacheMutex: &f.domainsCacheMutex,
RateLimiter: f.getRateLimiter(accessKeyID),
RateLimiterWaitTimeout: f.waitTimeout,
Logger: log.Log.WithName("ali-dnsclient"),
}, nil
}
func (f *clientFactory) getRateLimiter(accessKeyID string) *rate.Limiter {
// cache.Expiring Get and Set methods are concurrency-safe
// However, if f rate limiter is not present in the cache, it may happen that multiple rate limiters are created
// at the same time for the same access key id, and the desired QPS is exceeded, so use f mutex to guard against this
f.rateLimitersMutex.Lock()
defer f.rateLimitersMutex.Unlock()
// Get f rate limiter from the cache, or create f new one if not present
var rateLimiter *rate.Limiter
if v, ok := f.rateLimiters.Get(accessKeyID); ok {
rateLimiter = v.(*rate.Limiter)
} else {
rateLimiter = rate.NewLimiter(f.limit, f.burst)
}
// Set should be called on every Get with cache.Expiring to refresh the TTL
f.rateLimiters.Set(accessKeyID, rateLimiter, rateLimiterCacheTTL)
return rateLimiter
}
// GetDomainNames returns a map of all domain names mapped to their composite domain names.
func (d *dnsClient) GetDomainNames(ctx context.Context) (map[string]string, error) {
domains, err := d.getDomainsWithCache(ctx)
if err != nil {
return nil, err
}
domainNames := make(map[string]string)
for _, domain := range domains {
domainNames[domain.DomainName] = CompositeDomainName(domain.DomainName, domain.DomainId)
}
return domainNames, nil
}
// GetDomainName returns the composite domain name of the domain with the given domain id.
func (d *dnsClient) GetDomainName(ctx context.Context, domainId string) (string, error) {
domains, err := d.getDomainsWithCache(ctx)
if err != nil {
return "", err
}
domain, ok := domains[domainId]
if !ok {
return "", fmt.Errorf("DNS domain with id %s not found", domainId)
}
return CompositeDomainName(domain.DomainName, domain.DomainId), nil
}
// CreateOrUpdateDomainRecords creates or updates the domain records with the given domain name, name, record type,
// values, and ttl.
// * For each element in values that has an existing domain record, the existing record is updated if needed.
// * For each element in values that doesn't have an existing domain record, a new domain record is created.
// * For each existing domain record that doesn't have a corresponding element in values, the existing record is deleted.
func (d *dnsClient) CreateOrUpdateDomainRecords(ctx context.Context, domainName, name, recordType string, values []string, ttl int64) error {
domainName, _ = DomainNameAndId(domainName)
rr, err := getRR(name, domainName)
if err != nil {
return err
}
records, err := d.getDomainRecords(ctx, domainName, rr, recordType)
if err != nil {
return err
}
for _, value := range values {
if record, ok := records[value]; ok {
// Only update the existing domain record if the current TTL value is different from the given one
// At this point we know that rr, recordType, and value are the same
if record.TTL != ttl {
if err := d.updateDomainRecord(ctx, record.RecordId, rr, recordType, value, ttl); err != nil {
return err
}
}
} else {
if err := d.createDomainRecord(ctx, domainName, rr, recordType, value, ttl); err != nil {
return err
}
}
}
for value, record := range records {
if !utils.ValueExists(value, values) {
if err := d.deleteDomainRecord(ctx, record.RecordId); err != nil {
return err
}
}
}
return nil
}
// DeleteDomainRecords deletes the domain records with the given domain name, name and record type.
func (d *dnsClient) DeleteDomainRecords(ctx context.Context, domainName, name, recordType string) error {
domainName, _ = DomainNameAndId(domainName)
rr, err := getRR(name, domainName)
if err != nil {
return err
}
records, err := d.getDomainRecords(ctx, domainName, rr, recordType)
if err != nil {
return err
}
for _, record := range records {
if err := d.deleteDomainRecord(ctx, record.RecordId); err != nil {
return err
}
}
return nil
}
func (d *dnsClient) getDomainsWithCache(ctx context.Context) (map[string]alidns.Domain, error) {
// cache.Expiring Get and Set methods are concurrency-safe.
// However, if an accessKeyID is not present in the cache and multiple DNSRecords are reconciled at the same time,
// it may happen that getDomains is called multiple times instead of just one, so use a mutex to guard against this.
// It is ok to use a shared mutex here as far as the number of accessKeyIDs using custom domains is low.
// This may need to be revisited with a larger number of such accessKeyIDs to avoid them blocking each other
// during the (potentially long-running) call to getDomains.
d.domainsCacheMutex.Lock()
defer d.domainsCacheMutex.Unlock()
if v, ok := d.domainsCache.Get(d.accessKeyID); ok {
return v.(map[string]alidns.Domain), nil
}
domains, err := d.getDomains(ctx)
if err != nil {
return nil, err
}
d.domainsCache.Set(d.accessKeyID, domains, domainsCacheTTL)
return domains, nil
}
// getDomains returns all domains.
func (d *dnsClient) getDomains(ctx context.Context) (map[string]alidns.Domain, error) {
if err := d.waitForAliDNSRateLimiter(ctx); err != nil {
return nil, err
}
domains := make(map[string]alidns.Domain)
pageSize, pageNumber := 20, 1
req := alidns.CreateDescribeDomainsRequest()
req.PageSize = requests.NewInteger(pageSize)
for {
req.PageNumber = requests.NewInteger(pageNumber)
resp, err := d.Client.DescribeDomains(req)
if err != nil {
return nil, err
}
for _, domain := range resp.Domains.Domain {
domains[domain.DomainId] = getDomainFromResponse(domain)
}
if resp.PageNumber*int64(pageSize) >= resp.TotalCount {
break
}
pageNumber++
}
return domains, nil
}
func getDomainFromResponse(domainFromResp alidns.DomainInDescribeDomains) alidns.Domain {
return alidns.Domain{
DomainId: domainFromResp.DomainId,
DomainName: domainFromResp.DomainName,
AliDomain: domainFromResp.AliDomain,
CreateTimestamp: domainFromResp.CreateTimestamp,
//ExpireTimestamp: ,
InstanceEndTime: domainFromResp.InstanceEndTime,
CreateTime: domainFromResp.CreateTime,
// SourceProtocol: domainFromResp.SourceProtocol,
GroupName: domainFromResp.GroupName,
VersionCode: domainFromResp.VersionCode,
// UpdateTimestamp: domainFromResp.UpdateTimestamp,
RecordCount: domainFromResp.RecordCount,
InstanceExpired: domainFromResp.InstanceExpired,
ResourceGroupId: domainFromResp.ResourceGroupId,
// CacheTtlMin: domainFromResp.CacheTtlMin,
InstanceId: domainFromResp.InstanceId,
//ExpireTime:,
GroupId: domainFromResp.GroupId,
// SourceEdns: domainFromResp.SourceEdns,
RegistrantEmail: domainFromResp.RegistrantEmail,
VersionName: domainFromResp.VersionName,
// UpdateTime: domainFromResp.UpdateTime,
Remark: domainFromResp.Remark,
// CacheTtlMax: domainFromResp.CacheTtlMax,
PunyCode: domainFromResp.PunyCode,
Starmark: domainFromResp.Starmark,
// DnsServers: domainFromResp.DnsServers,
Tags: domainFromResp.Tags,
// SourceDnsServers: domainFromResp.SourceDnsServers,
}
}
// getDomainRecords returns the domain records with the given domain name, rr, and record type.
func (d *dnsClient) | (ctx context.Context, domainName, rr, recordType string) (map | getDomainRecords | identifier_name |
lib.rs | (outputs: &syn::ReturnType) -> TokenStream {
let ty = match outputs {
syn::ReturnType::Default => quote!(()),
syn::ReturnType::Type(_, ty) => quote!(#ty),
};
quote!(pg_extend::pg_type::PgType::from_rust::<#ty>().return_stmt())
}
fn impl_info_for_fdw(item: &syn::Item) -> TokenStream {
let typ = if let syn::Item::Struct(typ) = item {
typ
} else {
panic!("Annotation only supported on structs")
};
let mut decl = item.clone().into_token_stream();
let struct_name = &typ.ident;
let func_name = syn::Ident::new(
&format!("fdw_{}", struct_name),
Span::call_site(),
);
let info_fn = get_info_fn(&func_name);
let fdw_fn = quote!(
#[no_mangle]
pub extern "C" fn #func_name (func_call_info: pg_extend::pg_sys::FunctionCallInfo) -> pg_extend::pg_sys::Datum {
pg_extend::pg_fdw::ForeignWrapper::<#struct_name>::into_datum()
}
);
let create_sql_name =
syn::Ident::new(&format!("{}_pg_create_stmt", struct_name), Span::call_site());
let sql_stmt = format!(
"
CREATE OR REPLACE FUNCTION {0}() RETURNS fdw_handler AS '{{library_path}}', '{1}' LANGUAGE C STRICT;
CREATE FOREIGN DATA WRAPPER {0} handler {0} NO VALIDATOR;
",
struct_name, func_name,
);
// declare a function that can be used to output a create statement for the externed function
// all create statements will be put into a common module for access
let create_sql_def = quote!(
#[allow(unused)]
pub fn #create_sql_name(library_path: &str) -> String {
use pg_extend::pg_type::PgTypeInfo;
format!(
#sql_stmt,
library_path = library_path
)
}
);
decl.extend(info_fn);
decl.extend(create_sql_def);
decl.extend(fdw_fn);
decl
}
fn get_info_fn(func_name: &syn::Ident) -> TokenStream {
let func_info_name = syn::Ident::new(
&format!("pg_finfo_{}", func_name),
Span::call_site(),
);
// create the postgres info
quote!(
#[no_mangle]
pub extern "C" fn #func_info_name () -> &'static pg_extend::pg_sys::Pg_finfo_record {
const my_finfo: pg_extend::pg_sys::Pg_finfo_record = pg_extend::pg_sys::Pg_finfo_record { api_version: 1 };
&my_finfo
}
)
}
fn impl_info_for_fn(item: &syn::Item) -> TokenStream {
let func = if let syn::Item::Fn(func) = item {
func
} else {
panic!("annotation only supported on functions");
};
let func_name = &func.ident;
let func_decl = &func.decl;
if func_decl.variadic.is_some() {
panic!("variadic functions (...) not supported")
}
//let generics = &func_decl.generics;
let inputs = &func_decl.inputs;
let output = &func_decl.output;
//let func_block = &func.block;
// declare the function
let mut function = item.clone().into_token_stream();
let func_wrapper_name = syn::Ident::new(&format!("pg_{}", func_name), Span::call_site());
let func_info = get_info_fn(&func_wrapper_name);
// join the function information in
function.extend(func_info);
let get_args_from_datums = extract_arg_data(inputs);
let func_params = create_function_params(inputs.len());
// wrap the original function in a pg_wrapper function
let func_wrapper = quote!(
#[no_mangle]
pub extern "C" fn #func_wrapper_name (func_call_info: pg_extend::pg_sys::FunctionCallInfo) -> pg_extend::pg_sys::Datum {
use std::panic;
let func_info: &mut pg_extend::pg_sys::FunctionCallInfoData = unsafe {
func_call_info
.as_mut()
.expect("func_call_info was unexpectedly NULL")
};
// guard the Postgres process against the panic, and give us an oportunity to cleanup
let panic_result = panic::catch_unwind(|| {
// extract the argument list
let (mut args, mut args_null) = pg_extend::get_args(func_info);
// arbitrary Datum conversions occur here, and could panic
// so this is inside the catch unwind
#get_args_from_datums
// this is the meat of the function call into the extension code
let result = #func_name(#func_params);
// arbitrary Rust code could panic, so this is guarded
pg_extend::pg_datum::PgDatum::from(result)
});
// see if we caught a panic
match panic_result {
Ok(result) => {
// in addition to the null case, we should handle result types probably
let isnull: pg_extend::pg_bool::Bool = result.is_null().into();
func_info.isnull = isnull.into();
// return the datum
result.into_datum()
}
Err(err) => {
// ensure the return value is null
func_info.isnull = pg_extend::pg_bool::Bool::from(true).into();
// TODO: anything else to cean up before resuming the panic?
panic::resume_unwind(err)
}
}
}
);
let create_sql_name =
syn::Ident::new(&format!("{}_pg_create_stmt", func_name), Span::call_site());
let sql_params = sql_param_list(inputs.len());
let sql_param_types = sql_param_types(inputs);
let sql_return = sql_return_type(output);
// ret and library_path are replacements at runtime
let sql_stmt = format!(
// FIXME: Add/remove STRICT keywords based on Option<> arguments.
"CREATE or REPLACE FUNCTION {}({}) {{ret}} AS '{{library_path}}', '{}' LANGUAGE C STRICT;",
func_name, sql_params, func_wrapper_name,
);
// declare a function that can be used to output a create statement for the externed function
// all create statements will be put into a common module for access
let create_sql_def = quote!(
#[allow(unused)]
pub fn #create_sql_name(library_path: &str) -> String {
use pg_extend::pg_type::PgTypeInfo;
format!(
#sql_stmt,
#sql_param_types
ret = #sql_return,
library_path = library_path
)
}
);
function.extend(func_wrapper);
function.extend(create_sql_def);
function
}
/// An attribute macro for wrapping Rust functions with boiler plate for defining and
/// calling conventions between Postgres and Rust.
///
/// This mimics the C macro for defining functions
///
/// ```c
/// #define PG_FUNCTION_INFO_V1(funcname) \
/// extern Datum funcname(PG_FUNCTION_ARGS); \
/// extern PGDLLEXPORT const Pg_finfo_record * CppConcat(pg_finfo_,funcname)(void); \
/// const Pg_finfo_record * \
/// CppConcat(pg_finfo_,funcname) (void) \
/// { \
/// static const Pg_finfo_record my_finfo = { 1 }; \
/// return &my_finfo; \
/// } \
/// ```
///
/// # Returns
///
/// The result of this macro will be to produce a new function wrapping the one annotated but prepended with
/// `pg_` to distinquish them and also declares a function for Postgres to get the Function information;
///
/// For example: if the signature `fn add_one(value: i32) -> i32` is annotated, two functions will be produced,
/// the wrapper function with a signature of:
///
/// ```rust,no_run
/// extern crate pg_extend;
/// use pg_extend::pg_sys;
///
/// #[no_mangle]
/// pub extern "C" fn pg_add_one(func_call_info: pg_sys::FunctionCallInfo) -> pg_sys::Datum
/// # {
/// # unimplemented!()
/// # }
/// ```
///
/// and the info function with a signature of:
///
/// ```rust,no_run
/// extern crate pg_extend;
/// use pg_extend::pg_sys;
///
/// #[no_mangle]
/// pub extern "C" fn pg_finfo_pg_add_one() -> &'static pg_sys::Pg_finfo_record
/// # {
/// # unimplemented!()
/// # }
/// ```
///
#[proc_macro_attribute]
#[allow(clippy::needless_pass_by_value)]
pub fn pg_extern(
_attr: proc_macro::TokenStream,
item: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
// get a usable token stream
let ast: syn::Item = parse_macro_input!(item as syn::Item);
// Build the impl
let expanded: TokenStream = impl_info_for_fn(&ast);
// Return the generated impl
proc_macro::TokenStream::from(expanded)
}
/// An attribute macro for wrapping Rust structs with boiler plate for defining and exposing a foreign data wrapper
/// This is mostly a slimmed down version of pg_extern, with none of the data argument handling.
#[proc_macro_attribute]
#[allow(clippy::needless_pass_by_value)]
pub fn | pg_foreignwrapper | identifier_name |
|
lib.rs | let arg_type: &syn::Type = match *arg {
syn::FnArg::SelfRef(_) | syn::FnArg::SelfValue(_) => {
panic!("self functions not supported")
}
syn::FnArg::Inferred(_) => panic!("inferred function parameters not supported"),
syn::FnArg::Captured(ref captured) => &captured.ty,
syn::FnArg::Ignored(ref ty) => ty,
};
let arg_name = Ident::new(&format!("arg_{}", i), Span::call_site());
let arg_error = format!("unsupported function argument type for {}", arg_name);
let get_arg = quote!(
let #arg_name: #arg_type = pg_extend::pg_datum::TryFromPgDatum::try_from(
pg_extend::pg_datum::PgDatum::from_raw(
*args.next().expect("wrong number of args passed into get_args for args?"),
args_null.next().expect("wrong number of args passed into get_args for args_null?")
),
)
.expect(#arg_error);
);
get_args_stream.extend(get_arg);
}
get_args_stream
}
fn sql_param_list(num_args: usize) -> String {
let mut tokens = String::new();
if num_args == 0 {
return tokens;
}
let arg_name = |num: usize| format!("{{sql_{}}}", num);
for i in 0..(num_args - 1) {
let arg_name = arg_name(i);
tokens.push_str(&format!("{},", arg_name));
}
let arg_name = arg_name(num_args - 1);
tokens.push_str(&arg_name);
tokens
}
fn sql_param_types(inputs: &Punctuated<syn::FnArg, Comma>) -> TokenStream {
let mut tokens = TokenStream::new();
for (i, arg) in inputs.iter().enumerate() {
let arg_type: &syn::Type = match *arg {
syn::FnArg::SelfRef(_) | syn::FnArg::SelfValue(_) => {
panic!("self functions not supported")
}
syn::FnArg::Inferred(_) => panic!("inferred function parameters not supported"),
syn::FnArg::Captured(ref captured) => &captured.ty,
syn::FnArg::Ignored(ref ty) => ty,
};
let sql_name = Ident::new(&format!("sql_{}", i), Span::call_site());
let sql_param = quote!(
#sql_name = pg_extend::pg_type::PgType::from_rust::<#arg_type>().as_str(),
);
tokens.extend(sql_param);
}
tokens
}
fn sql_return_type(outputs: &syn::ReturnType) -> TokenStream |
fn impl_info_for_fdw(item: &syn::Item) -> TokenStream {
let typ = if let syn::Item::Struct(typ) = item {
typ
} else {
panic!("Annotation only supported on structs")
};
let mut decl = item.clone().into_token_stream();
let struct_name = &typ.ident;
let func_name = syn::Ident::new(
&format!("fdw_{}", struct_name),
Span::call_site(),
);
let info_fn = get_info_fn(&func_name);
let fdw_fn = quote!(
#[no_mangle]
pub extern "C" fn #func_name (func_call_info: pg_extend::pg_sys::FunctionCallInfo) -> pg_extend::pg_sys::Datum {
pg_extend::pg_fdw::ForeignWrapper::<#struct_name>::into_datum()
}
);
let create_sql_name =
syn::Ident::new(&format!("{}_pg_create_stmt", struct_name), Span::call_site());
let sql_stmt = format!(
"
CREATE OR REPLACE FUNCTION {0}() RETURNS fdw_handler AS '{{library_path}}', '{1}' LANGUAGE C STRICT;
CREATE FOREIGN DATA WRAPPER {0} handler {0} NO VALIDATOR;
",
struct_name, func_name,
);
// declare a function that can be used to output a create statement for the externed function
// all create statements will be put into a common module for access
let create_sql_def = quote!(
#[allow(unused)]
pub fn #create_sql_name(library_path: &str) -> String {
use pg_extend::pg_type::PgTypeInfo;
format!(
#sql_stmt,
library_path = library_path
)
}
);
decl.extend(info_fn);
decl.extend(create_sql_def);
decl.extend(fdw_fn);
decl
}
fn get_info_fn(func_name: &syn::Ident) -> TokenStream {
let func_info_name = syn::Ident::new(
&format!("pg_finfo_{}", func_name),
Span::call_site(),
);
// create the postgres info
quote!(
#[no_mangle]
pub extern "C" fn #func_info_name () -> &'static pg_extend::pg_sys::Pg_finfo_record {
const my_finfo: pg_extend::pg_sys::Pg_finfo_record = pg_extend::pg_sys::Pg_finfo_record { api_version: 1 };
&my_finfo
}
)
}
fn impl_info_for_fn(item: &syn::Item) -> TokenStream {
let func = if let syn::Item::Fn(func) = item {
func
} else {
panic!("annotation only supported on functions");
};
let func_name = &func.ident;
let func_decl = &func.decl;
if func_decl.variadic.is_some() {
panic!("variadic functions (...) not supported")
}
//let generics = &func_decl.generics;
let inputs = &func_decl.inputs;
let output = &func_decl.output;
//let func_block = &func.block;
// declare the function
let mut function = item.clone().into_token_stream();
let func_wrapper_name = syn::Ident::new(&format!("pg_{}", func_name), Span::call_site());
let func_info = get_info_fn(&func_wrapper_name);
// join the function information in
function.extend(func_info);
let get_args_from_datums = extract_arg_data(inputs);
let func_params = create_function_params(inputs.len());
// wrap the original function in a pg_wrapper function
let func_wrapper = quote!(
#[no_mangle]
pub extern "C" fn #func_wrapper_name (func_call_info: pg_extend::pg_sys::FunctionCallInfo) -> pg_extend::pg_sys::Datum {
use std::panic;
let func_info: &mut pg_extend::pg_sys::FunctionCallInfoData = unsafe {
func_call_info
.as_mut()
.expect("func_call_info was unexpectedly NULL")
};
// guard the Postgres process against the panic, and give us an oportunity to cleanup
let panic_result = panic::catch_unwind(|| {
// extract the argument list
let (mut args, mut args_null) = pg_extend::get_args(func_info);
// arbitrary Datum conversions occur here, and could panic
// so this is inside the catch unwind
#get_args_from_datums
// this is the meat of the function call into the extension code
let result = #func_name(#func_params);
// arbitrary Rust code could panic, so this is guarded
pg_extend::pg_datum::PgDatum::from(result)
});
// see if we caught a panic
match panic_result {
Ok(result) => {
// in addition to the null case, we should handle result types probably
let isnull: pg_extend::pg_bool::Bool = result.is_null().into();
func_info.isnull = isnull.into();
// return the datum
result.into_datum()
}
Err(err) => {
// ensure the return value is null
func_info.isnull = pg_extend::pg_bool::Bool::from(true).into();
// TODO: anything else to cean up before resuming the panic?
panic::resume_unwind(err)
}
}
}
);
let create_sql_name =
syn::Ident::new(&format!("{}_pg_create_stmt", func_name), Span::call_site());
let sql_params = sql_param_list(inputs.len());
let sql_param_types = sql_param_types(inputs);
let sql_return = sql_return_type(output);
// ret and library_path are replacements at runtime
let sql_stmt = format!(
// FIXME: Add/remove STRICT keywords based on Option<> arguments.
"CREATE or REPLACE FUNCTION {}({}) {{ret}} AS '{{library_path}}', '{}' LANGUAGE C STRICT;",
func_name, sql_params, func_wrapper_name,
);
// declare a function that can be used to output a create statement for the externed function
// all create statements will be put into a common module for access
let create_sql_def = quote!(
#[allow(unused)]
pub fn #create_sql_name(library_path: &str) -> String {
use pg_extend::pg_type::PgTypeInfo;
format!(
#sql_stmt,
#sql_param_types
ret = #sql_return,
library_path = library_path
)
}
);
function.extend(func_wrapper);
| {
let ty = match outputs {
syn::ReturnType::Default => quote!(()),
syn::ReturnType::Type(_, ty) => quote!(#ty),
};
quote!(pg_extend::pg_type::PgType::from_rust::<#ty>().return_stmt())
} | identifier_body |
lib.rs | let arg_type: &syn::Type = match *arg {
syn::FnArg::SelfRef(_) | syn::FnArg::SelfValue(_) => {
panic!("self functions not supported")
}
syn::FnArg::Inferred(_) => panic!("inferred function parameters not supported"),
syn::FnArg::Captured(ref captured) => &captured.ty,
syn::FnArg::Ignored(ref ty) => ty,
};
let arg_name = Ident::new(&format!("arg_{}", i), Span::call_site());
let arg_error = format!("unsupported function argument type for {}", arg_name);
let get_arg = quote!(
let #arg_name: #arg_type = pg_extend::pg_datum::TryFromPgDatum::try_from(
pg_extend::pg_datum::PgDatum::from_raw(
*args.next().expect("wrong number of args passed into get_args for args?"),
args_null.next().expect("wrong number of args passed into get_args for args_null?")
),
)
.expect(#arg_error);
);
get_args_stream.extend(get_arg);
}
get_args_stream
}
fn sql_param_list(num_args: usize) -> String {
let mut tokens = String::new();
if num_args == 0 {
return tokens;
}
let arg_name = |num: usize| format!("{{sql_{}}}", num);
for i in 0..(num_args - 1) {
let arg_name = arg_name(i);
tokens.push_str(&format!("{},", arg_name));
}
let arg_name = arg_name(num_args - 1);
tokens.push_str(&arg_name);
tokens
}
fn sql_param_types(inputs: &Punctuated<syn::FnArg, Comma>) -> TokenStream {
let mut tokens = TokenStream::new();
for (i, arg) in inputs.iter().enumerate() {
let arg_type: &syn::Type = match *arg {
syn::FnArg::SelfRef(_) | syn::FnArg::SelfValue(_) => {
panic!("self functions not supported")
}
syn::FnArg::Inferred(_) => panic!("inferred function parameters not supported"),
syn::FnArg::Captured(ref captured) => &captured.ty,
syn::FnArg::Ignored(ref ty) => ty,
};
let sql_name = Ident::new(&format!("sql_{}", i), Span::call_site());
let sql_param = quote!(
#sql_name = pg_extend::pg_type::PgType::from_rust::<#arg_type>().as_str(),
);
tokens.extend(sql_param);
}
tokens
}
fn sql_return_type(outputs: &syn::ReturnType) -> TokenStream {
let ty = match outputs {
syn::ReturnType::Default => quote!(()),
syn::ReturnType::Type(_, ty) => quote!(#ty),
};
quote!(pg_extend::pg_type::PgType::from_rust::<#ty>().return_stmt())
}
fn impl_info_for_fdw(item: &syn::Item) -> TokenStream {
let typ = if let syn::Item::Struct(typ) = item {
typ
} else {
panic!("Annotation only supported on structs")
};
let mut decl = item.clone().into_token_stream();
let struct_name = &typ.ident;
let func_name = syn::Ident::new(
&format!("fdw_{}", struct_name),
Span::call_site(),
);
let info_fn = get_info_fn(&func_name);
let fdw_fn = quote!(
#[no_mangle]
pub extern "C" fn #func_name (func_call_info: pg_extend::pg_sys::FunctionCallInfo) -> pg_extend::pg_sys::Datum {
pg_extend::pg_fdw::ForeignWrapper::<#struct_name>::into_datum()
}
);
let create_sql_name =
syn::Ident::new(&format!("{}_pg_create_stmt", struct_name), Span::call_site());
let sql_stmt = format!(
"
CREATE OR REPLACE FUNCTION {0}() RETURNS fdw_handler AS '{{library_path}}', '{1}' LANGUAGE C STRICT;
CREATE FOREIGN DATA WRAPPER {0} handler {0} NO VALIDATOR;
",
struct_name, func_name,
);
// declare a function that can be used to output a create statement for the externed function
// all create statements will be put into a common module for access
let create_sql_def = quote!(
#[allow(unused)]
pub fn #create_sql_name(library_path: &str) -> String {
use pg_extend::pg_type::PgTypeInfo;
format!(
#sql_stmt,
library_path = library_path
)
}
);
decl.extend(info_fn);
decl.extend(create_sql_def);
decl.extend(fdw_fn);
decl
}
fn get_info_fn(func_name: &syn::Ident) -> TokenStream {
let func_info_name = syn::Ident::new(
&format!("pg_finfo_{}", func_name),
Span::call_site(),
);
// create the postgres info
quote!(
#[no_mangle]
pub extern "C" fn #func_info_name () -> &'static pg_extend::pg_sys::Pg_finfo_record {
const my_finfo: pg_extend::pg_sys::Pg_finfo_record = pg_extend::pg_sys::Pg_finfo_record { api_version: 1 };
&my_finfo
}
)
}
fn impl_info_for_fn(item: &syn::Item) -> TokenStream {
let func = if let syn::Item::Fn(func) = item {
func
} else {
panic!("annotation only supported on functions");
};
let func_name = &func.ident;
let func_decl = &func.decl;
if func_decl.variadic.is_some() {
panic!("variadic functions (...) not supported")
}
//let generics = &func_decl.generics;
let inputs = &func_decl.inputs;
let output = &func_decl.output;
//let func_block = &func.block;
// declare the function
let mut function = item.clone().into_token_stream();
let func_wrapper_name = syn::Ident::new(&format!("pg_{}", func_name), Span::call_site());
let func_info = get_info_fn(&func_wrapper_name);
// join the function information in
function.extend(func_info);
let get_args_from_datums = extract_arg_data(inputs);
let func_params = create_function_params(inputs.len());
// wrap the original function in a pg_wrapper function
let func_wrapper = quote!(
#[no_mangle]
pub extern "C" fn #func_wrapper_name (func_call_info: pg_extend::pg_sys::FunctionCallInfo) -> pg_extend::pg_sys::Datum {
use std::panic;
let func_info: &mut pg_extend::pg_sys::FunctionCallInfoData = unsafe {
func_call_info
.as_mut()
.expect("func_call_info was unexpectedly NULL")
};
|
// arbitrary Datum conversions occur here, and could panic
// so this is inside the catch unwind
#get_args_from_datums
// this is the meat of the function call into the extension code
let result = #func_name(#func_params);
// arbitrary Rust code could panic, so this is guarded
pg_extend::pg_datum::PgDatum::from(result)
});
// see if we caught a panic
match panic_result {
Ok(result) => {
// in addition to the null case, we should handle result types probably
let isnull: pg_extend::pg_bool::Bool = result.is_null().into();
func_info.isnull = isnull.into();
// return the datum
result.into_datum()
}
Err(err) => {
// ensure the return value is null
func_info.isnull = pg_extend::pg_bool::Bool::from(true).into();
// TODO: anything else to cean up before resuming the panic?
panic::resume_unwind(err)
}
}
}
);
let create_sql_name =
syn::Ident::new(&format!("{}_pg_create_stmt", func_name), Span::call_site());
let sql_params = sql_param_list(inputs.len());
let sql_param_types = sql_param_types(inputs);
let sql_return = sql_return_type(output);
// ret and library_path are replacements at runtime
let sql_stmt = format!(
// FIXME: Add/remove STRICT keywords based on Option<> arguments.
"CREATE or REPLACE FUNCTION {}({}) {{ret}} AS '{{library_path}}', '{}' LANGUAGE C STRICT;",
func_name, sql_params, func_wrapper_name,
);
// declare a function that can be used to output a create statement for the externed function
// all create statements will be put into a common module for access
let create_sql_def = quote!(
#[allow(unused)]
pub fn #create_sql_name(library_path: &str) -> String {
use pg_extend::pg_type::PgTypeInfo;
format!(
#sql_stmt,
#sql_param_types
ret = #sql_return,
library_path = library_path
)
}
);
function.extend(func_wrapper);
| // guard the Postgres process against the panic, and give us an oportunity to cleanup
let panic_result = panic::catch_unwind(|| {
// extract the argument list
let (mut args, mut args_null) = pg_extend::get_args(func_info); | random_line_split |
biogrid-class.ts | ALL_BATTERY.DEFAULT_START_ENERGY;
return positions.map(
(position, index) =>
new BioBattery({
x: position.x,
y: position.y,
gridItemName: `${gridItemName}-${index}`,
gridItemResistance: batteryResistance,
energyInKiloWattHour: initEnergy,
maxCapacity,
} as BatteryParams)
);
}
/**
* This method creates a list of solar panels placed depending on their positions
* @param positions holds the positions where the solar panels are going to be placed
*/
// TODO pass a list of equal length to hold the area for the solar panels
private createSolarPanels(positions: ItemPosition[]): EnergySource[] {
return positions.map(
(position, index) =>
new SolarPanel({
x: position.x,
y: position.y,
efficiency: 0.75,
areaSquareMeters: bioconstants.SOLAR_PANEL.AREA,
gridItemName: `${bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL}-${index}`,
date: this.startDate,
} as SolarPanelParams)
);
}
/**
* Drain the energy users according to the time of day
*/
updateEnergyUsage(date: Date) {
this.town.getEnergyUsers().forEach((energyUser) => {
energyUser.decreaseEnergyAccordingToTimeOfDay(date);
});
}
/**
* This method takes the results of th brain and then it changes the state graph as suggested by the brain.
* The results of the brain are in form of an object key:value pair, with the receiver gridItemName as key and supplier gridItemName as value
* @param action holds the results from the brain
* @returns a the current state with a new graph which includes the changes that were suggested by the brain
*/
takeAction(action: GridAction) {
const powerEdges: { v: string; w: string; power: Power }[] = [];
// Set new efficiency
this.efficiency = action.getEfficiency();
// RETURN a new BiogridState
const allSupplyingPaths = action.getSupplyingPaths();
this.state.resetPowerOnEdges();
const clonedGraph = this.state.cloneStateGraph();
for (const supplyPath in allSupplyingPaths) {
const oldGridItem = this.state.getGridItem(supplyPath);
// take energy from the supplying grid item and transfer it to the energy user
const supplyingGridItem = this.state.getGridItem(
allSupplyingPaths[supplyPath]
);
const typeOldGridItem = this.getGridItemType(oldGridItem);
const energyUser = oldGridItem as Building | BioBattery;
const energyUserReq =
energyUser.getMaxCapacity() - energyUser.getEnergyInKilowattHour();
const typeSupplyingGridItem = this.getGridItemType(supplyingGridItem);
if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.ENERGY_USER) {
if (
typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY ||
typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY
) {
const battery = supplyingGridItem as BioBattery;
battery.supplyPower(energyUserReq);
clonedGraph.setNode(battery.gridItemName, battery);
} else if (
typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL
) {
const solarpanel = supplyingGridItem as SolarPanel;
solarpanel.supplyPower(energyUserReq);
clonedGraph.setNode(solarpanel.gridItemName, solarpanel);
} else {
continue;
}
(energyUser as Building).increaseEnergy(energyUserReq);
clonedGraph.setNode(energyUser.gridItemName, energyUser);
} else if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY) {
if (typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY) {
const battery = supplyingGridItem as BioBattery;
battery.supplyPower(energyUserReq);
clonedGraph.setNode(battery.gridItemName, battery);
} else if (
typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL
) {
const solarpanel = supplyingGridItem as SolarPanel;
solarpanel.supplyPower(energyUserReq);
clonedGraph.setNode(solarpanel.gridItemName, solarpanel);
} else {
continue;
}
(energyUser as BioBattery).startCharging(energyUserReq);
clonedGraph.setNode(energyUser.gridItemName, energyUser);
} else if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY) {
if (typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL) {
const solarpanel = supplyingGridItem as SolarPanel;
solarpanel.supplyPower(energyUserReq);
} else {
continue;
}
(energyUser as BioBattery).startCharging(energyUserReq);
clonedGraph.setNode(energyUser.gridItemName, energyUser);
}
powerEdges.push({
v: supplyingGridItem.gridItemName,
w: energyUser.gridItemName,
// Convert kilowatthours into kilowatts
power: energyUserReq / bioconstants.TIME.DISCRETE_UNIT_HOURS,
});
}
this.state.setnewStateGraph(clonedGraph);
powerEdges.forEach((powerEdge) => {
this.state.setPowerBetweenNodes(
powerEdge.v,
powerEdge.w,
powerEdge.power
);
});
return this.state;
}
private getGridItemType(gridItem: GridItem): string {
if (
gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.ENERGY_USER)
) {
return bioconstants.GRID_ITEM_NAMES.ENERGY_USER;
} else if (
gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY)
) {
return bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY;
} else if (
gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY)
) {
return bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY;
} else if (
gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL)
) {
return bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL;
}
return bioconstants.GRID_ITEM_NAMES.GRID;
}
/**
* A simplified algorithm to (mostly) evenly space out batteries throughout the square town
* Split the town into rows and columns and then place a battery in the center of each cell
* TODO: have a smart algorithm for placement, see https://github.com/googleinterns/step141-2020/issues/42
*/
private createGridItemPositions(
townSize: TownSize,
numberOfGridItems: number
): ItemPosition[] {
const cols = Math.ceil(numberOfGridItems / townSize.width);
const rows = Math.ceil(numberOfGridItems / cols);
const positions: ItemPosition[] = [];
for (let i = 0; i < numberOfGridItems; i++) {
const newPositionUnverified = {
x: this.roundToGridDistance(
(((i % cols) + 0.5) / cols) * townSize.width
),
y: this.roundToGridDistance(
((Math.floor(i / cols) + 0.5) / rows) * townSize.height
),
};
const newPosition = this.findNearestUnoccupiedPosition(
newPositionUnverified,
townSize
);
positions.push(newPosition);
this.itemInPosition[this.formatItemPosition(newPosition)] = true;
}
return positions;
}
/**
* Find the nearest unoccupied position to {@code pos} by looking looking in a spiral with pos at its center
* First the space immediately right of pos is checked, then the one above, then to the left, then below, then two right spaces out, two right up, etc
*/
private findNearestUnoccupiedPosition(
pos: ItemPosition,
townSize: TownSize
): ItemPosition {
let radius = bioconstants.GRID_DISTANCES.INCREMENTS_KM;
let angle = 0;
let outOfBoundsCount = 0;
let xOffset = 0,
yOffset = 0;
let newPos = { x: pos.x + xOffset, y: pos.y + yOffset };
// If {@code outOfBoundsCount} is greater than 3, then that means the upwards, left, right, and down
// Are all out of bounds. Thus there is no where left to place the item
while (
(this.positionOutOfBounds(newPos, townSize) ||
this.positionOccupied(newPos)) &&
outOfBoundsCount <= 3
) | {
if (this.positionOutOfBounds(newPos, townSize)) {
outOfBoundsCount++;
}
switch (angle) {
case 0:
yOffset = 0;
xOffset = radius;
break;
case 90:
xOffset = 0;
yOffset = radius;
break;
case 180:
xOffset = -1 * radius;
yOffset = 0;
break;
case 270:
xOffset = 0;
yOffset = -1 * radius; | conditional_block |
|
biogrid-class.ts | approximately have a maxCapacity of 540,000KJ
private largeBatteries: Battery[];
// A dictionary with the position as its key
// Used to keep track of whether an item is already placed in a position
private itemInPosition: { [positionString: string]: boolean } = {};
// All details for the source of energy
private solarPanels: EnergySource[];
// Holds the efficiency of the grid
private efficiency: number;
constructor(private town: Town, opts: BiogridOptions) {
const todayMidnight = new Date();
todayMidnight.setHours(0);
this.startDate = opts.startDate || todayMidnight;
// Batteries
const smallBatteryPositions = this.createGridItemPositions(
town.getTownSize(),
opts.numberOfSmallBatteryCells
);
const largeBatteryPositions = this.createGridItemPositions(
town.getTownSize(),
opts.numberOfLargeBatteryCells
);
this.smallBatteries = this.createBatteries(
smallBatteryPositions,
bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY
);
this.largeBatteries = this.createBatteries(
largeBatteryPositions,
bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY
);
// Enery Source
const solarPanelPositions = this.createGridItemPositions(
town.getTownSize(),
opts.numberOfSolarPanels
);
this.solarPanels = this.createSolarPanels(solarPanelPositions);
this.state = new BiogridState(this.createGridItems(), town.getTownSize());
// Set the effieciency to 0 at the beginning
this.efficiency = 0;
}
private createGridItems(): GridItem[] {
return [
...this.smallBatteries,
...this.largeBatteries,
...this.town.getEnergyUsers(),
...this.solarPanels,
];
}
getTownSize() {
return this.town.getTownSize();
}
getSystemState() {
return this.state;
}
getEfficiency() {
// Round off the efficiency to 3 dps
return this.efficiency.toFixed(3);
}
getJsonGraphDetails() |
private createBatteries(
positions: ItemPosition[],
gridItemName: string
): Battery[] {
const batteryResistance =
gridItemName === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY
? bioconstants.RESISTANCE.LARGE_BATTERY
: bioconstants.RESISTANCE.SMALL_BATTERY;
const maxCapacity =
gridItemName === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY
? bioconstants.LARGE_BATTERY.MAX_CAPACITY
: bioconstants.SMALL_BATTERY.MAX_CAPACITY;
const initEnergy =
gridItemName === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY
? bioconstants.LARGE_BATTERY.DEFAULT_START_ENERGY
: bioconstants.SMALL_BATTERY.DEFAULT_START_ENERGY;
return positions.map(
(position, index) =>
new BioBattery({
x: position.x,
y: position.y,
gridItemName: `${gridItemName}-${index}`,
gridItemResistance: batteryResistance,
energyInKiloWattHour: initEnergy,
maxCapacity,
} as BatteryParams)
);
}
/**
* This method creates a list of solar panels placed depending on their positions
* @param positions holds the positions where the solar panels are going to be placed
*/
// TODO pass a list of equal length to hold the area for the solar panels
private createSolarPanels(positions: ItemPosition[]): EnergySource[] {
return positions.map(
(position, index) =>
new SolarPanel({
x: position.x,
y: position.y,
efficiency: 0.75,
areaSquareMeters: bioconstants.SOLAR_PANEL.AREA,
gridItemName: `${bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL}-${index}`,
date: this.startDate,
} as SolarPanelParams)
);
}
/**
* Drain the energy users according to the time of day
*/
updateEnergyUsage(date: Date) {
this.town.getEnergyUsers().forEach((energyUser) => {
energyUser.decreaseEnergyAccordingToTimeOfDay(date);
});
}
/**
* This method takes the results of th brain and then it changes the state graph as suggested by the brain.
* The results of the brain are in form of an object key:value pair, with the receiver gridItemName as key and supplier gridItemName as value
* @param action holds the results from the brain
* @returns a the current state with a new graph which includes the changes that were suggested by the brain
*/
takeAction(action: GridAction) {
const powerEdges: { v: string; w: string; power: Power }[] = [];
// Set new efficiency
this.efficiency = action.getEfficiency();
// RETURN a new BiogridState
const allSupplyingPaths = action.getSupplyingPaths();
this.state.resetPowerOnEdges();
const clonedGraph = this.state.cloneStateGraph();
for (const supplyPath in allSupplyingPaths) {
const oldGridItem = this.state.getGridItem(supplyPath);
// take energy from the supplying grid item and transfer it to the energy user
const supplyingGridItem = this.state.getGridItem(
allSupplyingPaths[supplyPath]
);
const typeOldGridItem = this.getGridItemType(oldGridItem);
const energyUser = oldGridItem as Building | BioBattery;
const energyUserReq =
energyUser.getMaxCapacity() - energyUser.getEnergyInKilowattHour();
const typeSupplyingGridItem = this.getGridItemType(supplyingGridItem);
if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.ENERGY_USER) {
if (
typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY ||
typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY
) {
const battery = supplyingGridItem as BioBattery;
battery.supplyPower(energyUserReq);
clonedGraph.setNode(battery.gridItemName, battery);
} else if (
typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL
) {
const solarpanel = supplyingGridItem as SolarPanel;
solarpanel.supplyPower(energyUserReq);
clonedGraph.setNode(solarpanel.gridItemName, solarpanel);
} else {
continue;
}
(energyUser as Building).increaseEnergy(energyUserReq);
clonedGraph.setNode(energyUser.gridItemName, energyUser);
} else if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY) {
if (typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY) {
const battery = supplyingGridItem as BioBattery;
battery.supplyPower(energyUserReq);
clonedGraph.setNode(battery.gridItemName, battery);
} else if (
typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL
) {
const solarpanel = supplyingGridItem as SolarPanel;
solarpanel.supplyPower(energyUserReq);
clonedGraph.setNode(solarpanel.gridItemName, solarpanel);
} else {
continue;
}
(energyUser as BioBattery).startCharging(energyUserReq);
clonedGraph.setNode(energyUser.gridItemName, energyUser);
} else if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY) {
if (typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL) {
const solarpanel = supplyingGridItem as SolarPanel;
solarpanel.supplyPower(energyUserReq);
} else {
continue;
}
(energyUser as BioBattery).startCharging(energyUserReq);
clonedGraph.setNode(energyUser.gridItemName, energyUser);
}
powerEdges.push({
v: supplyingGridItem.gridItemName,
w: energyUser.gridItemName,
// Convert kilowatthours into kilowatts
power: energyUserReq / bioconstants.TIME.DISCRETE_UNIT_HOURS,
});
}
this.state.setnewStateGraph(clonedGraph);
powerEdges.forEach((powerEdge) => {
this.state.setPowerBetweenNodes(
powerEdge.v,
powerEdge.w,
powerEdge.power
);
});
return this.state;
}
private getGridItemType(gridItem: GridItem): string {
if (
gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.ENERGY_USER)
) {
return bioconstants.GRID_ITEM_NAMES.ENERGY_USER;
} else if (
gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY)
) {
return bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY;
} else if (
gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY)
) {
return bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY;
} else if (
gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL)
) {
return bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL;
}
return bioconstants | {
return this.state.getJsonGraph();
} | identifier_body |
biogrid-class.ts | , opts: BiogridOptions) {
const todayMidnight = new Date();
todayMidnight.setHours(0);
this.startDate = opts.startDate || todayMidnight;
// Batteries
const smallBatteryPositions = this.createGridItemPositions(
town.getTownSize(),
opts.numberOfSmallBatteryCells
);
const largeBatteryPositions = this.createGridItemPositions(
town.getTownSize(),
opts.numberOfLargeBatteryCells
);
this.smallBatteries = this.createBatteries(
smallBatteryPositions,
bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY
);
this.largeBatteries = this.createBatteries(
largeBatteryPositions,
bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY
);
// Enery Source
const solarPanelPositions = this.createGridItemPositions(
town.getTownSize(),
opts.numberOfSolarPanels
);
this.solarPanels = this.createSolarPanels(solarPanelPositions);
this.state = new BiogridState(this.createGridItems(), town.getTownSize());
// Set the effieciency to 0 at the beginning
this.efficiency = 0;
}
private createGridItems(): GridItem[] {
return [
...this.smallBatteries,
...this.largeBatteries,
...this.town.getEnergyUsers(),
...this.solarPanels,
];
}
getTownSize() {
return this.town.getTownSize();
}
getSystemState() {
return this.state;
}
getEfficiency() {
// Round off the efficiency to 3 dps
return this.efficiency.toFixed(3);
}
getJsonGraphDetails() {
return this.state.getJsonGraph();
}
private createBatteries(
positions: ItemPosition[],
gridItemName: string
): Battery[] {
const batteryResistance =
gridItemName === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY
? bioconstants.RESISTANCE.LARGE_BATTERY
: bioconstants.RESISTANCE.SMALL_BATTERY;
const maxCapacity =
gridItemName === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY
? bioconstants.LARGE_BATTERY.MAX_CAPACITY
: bioconstants.SMALL_BATTERY.MAX_CAPACITY;
const initEnergy =
gridItemName === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY
? bioconstants.LARGE_BATTERY.DEFAULT_START_ENERGY
: bioconstants.SMALL_BATTERY.DEFAULT_START_ENERGY;
return positions.map(
(position, index) =>
new BioBattery({
x: position.x,
y: position.y,
gridItemName: `${gridItemName}-${index}`,
gridItemResistance: batteryResistance,
energyInKiloWattHour: initEnergy,
maxCapacity,
} as BatteryParams)
);
}
/**
* This method creates a list of solar panels placed depending on their positions
* @param positions holds the positions where the solar panels are going to be placed
*/
// TODO pass a list of equal length to hold the area for the solar panels
private createSolarPanels(positions: ItemPosition[]): EnergySource[] {
return positions.map(
(position, index) =>
new SolarPanel({
x: position.x,
y: position.y,
efficiency: 0.75,
areaSquareMeters: bioconstants.SOLAR_PANEL.AREA,
gridItemName: `${bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL}-${index}`,
date: this.startDate,
} as SolarPanelParams)
);
}
/**
* Drain the energy users according to the time of day
*/
updateEnergyUsage(date: Date) {
this.town.getEnergyUsers().forEach((energyUser) => {
energyUser.decreaseEnergyAccordingToTimeOfDay(date);
});
}
/**
* This method takes the results of th brain and then it changes the state graph as suggested by the brain.
* The results of the brain are in form of an object key:value pair, with the receiver gridItemName as key and supplier gridItemName as value
* @param action holds the results from the brain
* @returns a the current state with a new graph which includes the changes that were suggested by the brain
*/
takeAction(action: GridAction) {
const powerEdges: { v: string; w: string; power: Power }[] = [];
// Set new efficiency
this.efficiency = action.getEfficiency();
// RETURN a new BiogridState
const allSupplyingPaths = action.getSupplyingPaths();
this.state.resetPowerOnEdges();
const clonedGraph = this.state.cloneStateGraph();
for (const supplyPath in allSupplyingPaths) {
const oldGridItem = this.state.getGridItem(supplyPath);
// take energy from the supplying grid item and transfer it to the energy user
const supplyingGridItem = this.state.getGridItem(
allSupplyingPaths[supplyPath]
);
const typeOldGridItem = this.getGridItemType(oldGridItem);
const energyUser = oldGridItem as Building | BioBattery;
const energyUserReq =
energyUser.getMaxCapacity() - energyUser.getEnergyInKilowattHour();
const typeSupplyingGridItem = this.getGridItemType(supplyingGridItem);
if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.ENERGY_USER) {
if (
typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY ||
typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY
) {
const battery = supplyingGridItem as BioBattery;
battery.supplyPower(energyUserReq);
clonedGraph.setNode(battery.gridItemName, battery);
} else if (
typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL
) {
const solarpanel = supplyingGridItem as SolarPanel;
solarpanel.supplyPower(energyUserReq);
clonedGraph.setNode(solarpanel.gridItemName, solarpanel);
} else {
continue;
}
(energyUser as Building).increaseEnergy(energyUserReq);
clonedGraph.setNode(energyUser.gridItemName, energyUser);
} else if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY) {
if (typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY) {
const battery = supplyingGridItem as BioBattery;
battery.supplyPower(energyUserReq);
clonedGraph.setNode(battery.gridItemName, battery);
} else if (
typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL
) {
const solarpanel = supplyingGridItem as SolarPanel;
solarpanel.supplyPower(energyUserReq);
clonedGraph.setNode(solarpanel.gridItemName, solarpanel);
} else {
continue;
}
(energyUser as BioBattery).startCharging(energyUserReq);
clonedGraph.setNode(energyUser.gridItemName, energyUser);
} else if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY) {
if (typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL) {
const solarpanel = supplyingGridItem as SolarPanel;
solarpanel.supplyPower(energyUserReq);
} else {
continue;
}
(energyUser as BioBattery).startCharging(energyUserReq);
clonedGraph.setNode(energyUser.gridItemName, energyUser);
}
powerEdges.push({
v: supplyingGridItem.gridItemName,
w: energyUser.gridItemName,
// Convert kilowatthours into kilowatts
power: energyUserReq / bioconstants.TIME.DISCRETE_UNIT_HOURS,
});
}
this.state.setnewStateGraph(clonedGraph);
powerEdges.forEach((powerEdge) => {
this.state.setPowerBetweenNodes(
powerEdge.v,
powerEdge.w,
powerEdge.power
);
});
return this.state;
}
private getGridItemType(gridItem: GridItem): string {
if (
gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.ENERGY_USER)
) {
return bioconstants.GRID_ITEM_NAMES.ENERGY_USER;
} else if (
gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY)
) {
return bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY;
} else if (
gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY)
) {
return bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY;
} else if (
gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL)
) {
return bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL;
}
return bioconstants.GRID_ITEM_NAMES.GRID;
}
/**
* A simplified algorithm to (mostly) evenly space out batteries throughout the square town
* Split the town into rows and columns and then place a battery in the center of each cell
* TODO: have a smart algorithm for placement, see https://github.com/googleinterns/step141-2020/issues/42
*/
private createGridItemPositions( | townSize: TownSize,
numberOfGridItems: number
): ItemPosition[] { | random_line_split |
|
biogrid-class.ts | );
}
/**
* Drain the energy users according to the time of day
*/
updateEnergyUsage(date: Date) {
this.town.getEnergyUsers().forEach((energyUser) => {
energyUser.decreaseEnergyAccordingToTimeOfDay(date);
});
}
/**
* This method takes the results of th brain and then it changes the state graph as suggested by the brain.
* The results of the brain are in form of an object key:value pair, with the receiver gridItemName as key and supplier gridItemName as value
* @param action holds the results from the brain
* @returns a the current state with a new graph which includes the changes that were suggested by the brain
*/
takeAction(action: GridAction) {
const powerEdges: { v: string; w: string; power: Power }[] = [];
// Set new efficiency
this.efficiency = action.getEfficiency();
// RETURN a new BiogridState
const allSupplyingPaths = action.getSupplyingPaths();
this.state.resetPowerOnEdges();
const clonedGraph = this.state.cloneStateGraph();
for (const supplyPath in allSupplyingPaths) {
const oldGridItem = this.state.getGridItem(supplyPath);
// take energy from the supplying grid item and transfer it to the energy user
const supplyingGridItem = this.state.getGridItem(
allSupplyingPaths[supplyPath]
);
const typeOldGridItem = this.getGridItemType(oldGridItem);
const energyUser = oldGridItem as Building | BioBattery;
const energyUserReq =
energyUser.getMaxCapacity() - energyUser.getEnergyInKilowattHour();
const typeSupplyingGridItem = this.getGridItemType(supplyingGridItem);
if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.ENERGY_USER) {
if (
typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY ||
typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY
) {
const battery = supplyingGridItem as BioBattery;
battery.supplyPower(energyUserReq);
clonedGraph.setNode(battery.gridItemName, battery);
} else if (
typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL
) {
const solarpanel = supplyingGridItem as SolarPanel;
solarpanel.supplyPower(energyUserReq);
clonedGraph.setNode(solarpanel.gridItemName, solarpanel);
} else {
continue;
}
(energyUser as Building).increaseEnergy(energyUserReq);
clonedGraph.setNode(energyUser.gridItemName, energyUser);
} else if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY) {
if (typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY) {
const battery = supplyingGridItem as BioBattery;
battery.supplyPower(energyUserReq);
clonedGraph.setNode(battery.gridItemName, battery);
} else if (
typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL
) {
const solarpanel = supplyingGridItem as SolarPanel;
solarpanel.supplyPower(energyUserReq);
clonedGraph.setNode(solarpanel.gridItemName, solarpanel);
} else {
continue;
}
(energyUser as BioBattery).startCharging(energyUserReq);
clonedGraph.setNode(energyUser.gridItemName, energyUser);
} else if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY) {
if (typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL) {
const solarpanel = supplyingGridItem as SolarPanel;
solarpanel.supplyPower(energyUserReq);
} else {
continue;
}
(energyUser as BioBattery).startCharging(energyUserReq);
clonedGraph.setNode(energyUser.gridItemName, energyUser);
}
powerEdges.push({
v: supplyingGridItem.gridItemName,
w: energyUser.gridItemName,
// Convert kilowatthours into kilowatts
power: energyUserReq / bioconstants.TIME.DISCRETE_UNIT_HOURS,
});
}
this.state.setnewStateGraph(clonedGraph);
powerEdges.forEach((powerEdge) => {
this.state.setPowerBetweenNodes(
powerEdge.v,
powerEdge.w,
powerEdge.power
);
});
return this.state;
}
private getGridItemType(gridItem: GridItem): string {
if (
gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.ENERGY_USER)
) {
return bioconstants.GRID_ITEM_NAMES.ENERGY_USER;
} else if (
gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY)
) {
return bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY;
} else if (
gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY)
) {
return bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY;
} else if (
gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL)
) {
return bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL;
}
return bioconstants.GRID_ITEM_NAMES.GRID;
}
/**
* A simplified algorithm to (mostly) evenly space out batteries throughout the square town
* Split the town into rows and columns and then place a battery in the center of each cell
* TODO: have a smart algorithm for placement, see https://github.com/googleinterns/step141-2020/issues/42
*/
private createGridItemPositions(
townSize: TownSize,
numberOfGridItems: number
): ItemPosition[] {
const cols = Math.ceil(numberOfGridItems / townSize.width);
const rows = Math.ceil(numberOfGridItems / cols);
const positions: ItemPosition[] = [];
for (let i = 0; i < numberOfGridItems; i++) {
const newPositionUnverified = {
x: this.roundToGridDistance(
(((i % cols) + 0.5) / cols) * townSize.width
),
y: this.roundToGridDistance(
((Math.floor(i / cols) + 0.5) / rows) * townSize.height
),
};
const newPosition = this.findNearestUnoccupiedPosition(
newPositionUnverified,
townSize
);
positions.push(newPosition);
this.itemInPosition[this.formatItemPosition(newPosition)] = true;
}
return positions;
}
/**
* Find the nearest unoccupied position to {@code pos} by looking looking in a spiral with pos at its center
* First the space immediately right of pos is checked, then the one above, then to the left, then below, then two right spaces out, two right up, etc
*/
private findNearestUnoccupiedPosition(
pos: ItemPosition,
townSize: TownSize
): ItemPosition {
let radius = bioconstants.GRID_DISTANCES.INCREMENTS_KM;
let angle = 0;
let outOfBoundsCount = 0;
let xOffset = 0,
yOffset = 0;
let newPos = { x: pos.x + xOffset, y: pos.y + yOffset };
// If {@code outOfBoundsCount} is greater than 3, then that means the upwards, left, right, and down
// Are all out of bounds. Thus there is no where left to place the item
while (
(this.positionOutOfBounds(newPos, townSize) ||
this.positionOccupied(newPos)) &&
outOfBoundsCount <= 3
) {
if (this.positionOutOfBounds(newPos, townSize)) {
outOfBoundsCount++;
}
switch (angle) {
case 0:
yOffset = 0;
xOffset = radius;
break;
case 90:
xOffset = 0;
yOffset = radius;
break;
case 180:
xOffset = -1 * radius;
yOffset = 0;
break;
case 270:
xOffset = 0;
yOffset = -1 * radius;
break;
}
newPos = { x: pos.x + xOffset, y: pos.y + yOffset };
// Increment the angle by 90 degrees
angle = angle + 90;
if (angle === 360) {
radius += bioconstants.GRID_DISTANCES.INCREMENTS_KM;
// Reset the angle
angle = 0;
}
}
if (outOfBoundsCount > 3) {
throw new Error(
`There are too many items on the grid. New items could not be placed with a minimum distance of ${bioconstants.GRID_DISTANCES.INCREMENTS_KM} km apart`
);
}
return newPos;
}
private positionOutOfBounds(pos: ItemPosition, townSize: TownSize): boolean {
return pos.x > townSize.width || pos.y > townSize.height;
}
private roundToGridDistance(distance: Distance): Distance {
return (
Math.floor(distance / bioconstants.GRID_DISTANCES.INCREMENTS_KM) *
bioconstants.GRID_DISTANCES.INCREMENTS_KM
);
}
private | positionOccupied | identifier_name |
|
sync.rs | unwrap()
}
fn buf_to_state(buf: &[u8]) -> Result<Engine, serde_json::Error> {
serde_json::from_slice(buf)
}
/// Stores state needed by the container to perform synchronization.
pub struct SyncStore {
page: Page_Proxy,
key: Vec<u8>,
updates: Sender<SyncMsg>,
transaction_pending: bool,
buffer: BufferIdentifier,
}
impl SyncStore {
/// - `page` is a reference to the Ledger page to store data under.
/// - `key` is the key the `Syncable` managed by this `SyncStore` will be stored under.
/// This example only supports storing things under a single key per page.
/// - `updates` is a channel to a `SyncUpdater` that will handle events.
///
/// Returns a sync store and schedules the loading of initial
/// state and subscribes to state updates for this document.
pub fn new(
mut page: Page_Proxy,
key: Vec<u8>,
updates: Sender<SyncMsg>,
buffer: BufferIdentifier,
) -> SyncStore {
let (s1, s2) = Channel::create(ChannelOpts::Normal).unwrap();
let watcher_client = PageWatcher_Client::from_handle(s1.into_handle());
let watcher_client_ptr =
::fidl::InterfacePtr { inner: watcher_client, version: PageWatcher_Metadata::VERSION };
let watcher = PageWatcherServer { updates: updates.clone(), buffer: buffer.clone() };
let _ = fidl::Server::new(watcher, s2).spawn();
let (mut snap, snap_request) = PageSnapshot_new_pair();
page.get_snapshot(snap_request, Some(key.clone()), Some(watcher_client_ptr))
.with(ledger_crash_callback);
let initial_state_chan = updates.clone();
let initial_buffer = buffer.clone();
snap.get(key.clone()).with(move |raw_res| {
match raw_res.map(|res| ledger::value_result(res)) {
Ok(Ok(Some(buf))) => {
initial_state_chan
.send(SyncMsg::NewState {
buffer: initial_buffer,
new_buf: buf,
done: None,
})
.unwrap();
}
Ok(Ok(None)) => (), // No initial state saved yet
Err(err) => error!("FIDL failed on initial response: {:?}", err),
Ok(Err(err)) => error!("Ledger failed to retrieve key: {:?}", err),
}
});
SyncStore { page, key, updates, buffer, transaction_pending: false }
}
/// Called whenever this app changed its own state and would like to
/// persist the changes to the ledger. Changes can't be committed
/// immediately since we have to wait for PageWatcher changes that may not
/// have arrived yet.
pub fn state_changed(&mut self) {
if !self.transaction_pending {
self.transaction_pending = true;
let ready_future = self.page.start_transaction();
let done_chan = self.updates.clone();
let buffer = self.buffer.clone();
ready_future.with(move |res| match res {
Ok(ledger::OK) => {
done_chan.send(SyncMsg::TransactionReady { buffer }).unwrap();
}
Ok(err_status) => error!("Ledger failed to start transaction: {:?}", err_status),
Err(err) => error!("FIDL failed on starting transaction: {:?}", err),
});
}
}
/// Should be called in SyncContainer::transaction_ready to persist the current state.
pub fn commit_transaction(&mut self, state: &Engine) {
assert!(self.transaction_pending, "must call state_changed (and wait) before commit");
self.page.put(self.key.clone(), state_to_buf(state)).with(ledger_crash_callback);
self.page.commit().with(ledger_crash_callback);
self.transaction_pending = false;
}
}
/// All the different asynchronous events the updater thread needs to listen for and act on
pub enum SyncMsg {
NewState {
buffer: BufferIdentifier,
new_buf: Vec<u8>,
done: Option<Promise<Option<PageSnapshot_Server>, fidl::Error>>,
},
TransactionReady {
buffer: BufferIdentifier,
},
/// Shut down the updater thread
Stop,
}
/// We want to be able to register to receive events from inside the
/// `SyncStore`/`SyncContainer` but from there we don't have access to the
/// Mutex that holds the container, so we give channel Senders to all the
/// futures so that they can all trigger events in one place that does have
/// the right reference.
///
/// Additionally, the individual `Editor`s aren't wrapped in a `Mutex` so we
/// have to hold a `BufferContainerRef` and use `BufferIdentifier`s with one
/// `SyncUpdater` for all buffers.
pub struct SyncUpdater<W: Write> {
container_ref: BufferContainerRef<W>,
chan: Receiver<SyncMsg>,
}
impl<W: Write + Send + 'static> SyncUpdater<W> {
pub fn new(container_ref: BufferContainerRef<W>, chan: Receiver<SyncMsg>) -> SyncUpdater<W> |
/// Run this in a thread, it will return when it encounters an error
/// reading the channel or when the `Stop` message is recieved.
pub fn work(&self) -> Result<(), RecvError> {
loop {
let msg = self.chan.recv()?;
match msg {
SyncMsg::Stop => return Ok(()),
SyncMsg::TransactionReady { buffer } => {
let mut container = self.container_ref.lock();
// if the buffer was closed, hopefully the page connection was as well, which I hope aborts transactions
if let Some(mut editor) = container.editor_for_buffer_mut(&buffer) {
editor.transaction_ready();
}
}
SyncMsg::NewState { new_buf, done, buffer } => {
let mut container = self.container_ref.lock();
match (container.editor_for_buffer_mut(&buffer), buf_to_state(&new_buf)) {
(Some(mut editor), Ok(new_state)) => {
editor.merge_new_state(new_state);
if let Some(promise) = done {
promise.set_ok(None);
}
}
(None, _) => (), // buffer was closed
(_, Err(err)) => error!("Ledger was set to invalid state: {:?}", err),
}
}
}
}
}
}
struct PageWatcherServer {
updates: Sender<SyncMsg>,
buffer: BufferIdentifier,
}
impl PageWatcher for PageWatcherServer {
fn on_change(
&mut self,
page_change: PageChange,
result_state: ResultState,
) -> Future<Option<PageSnapshot_Server>, fidl::Error> {
let (future, done) = Future::make_promise();
let value_opt = page_change.changes.get(0).and_then(|c| c.value.as_ref());
if let (ledger::RESULT_COMPLETED, Some(value_vmo)) = (result_state, value_opt) {
let new_buf = read_entire_vmo(value_vmo).expect("failed to read key Vmo");
self.updates
.send(SyncMsg::NewState { buffer: self.buffer.clone(), new_buf, done: Some(done) })
.unwrap();
} else {
error!("Xi state corrupted, should have one key but has multiple.");
// I don't think this should be a FIDL-level error, so set okay
done.set_ok(None);
}
future
}
}
impl PageWatcher_Stub for PageWatcherServer {
// Use default dispatching, but we could override it here.
}
impl_fidl_stub!(PageWatcherServer: PageWatcher_Stub);
// ============= Conflict resolution
pub fn start_conflict_resolver_factory(ledger: &mut Ledger_Proxy, key: Vec<u8>) {
let (s1, s2) = Channel::create(ChannelOpts::Normal).unwrap();
let resolver_client = ConflictResolverFactory_Client::from_handle(s1.into_handle());
let resolver_client_ptr = ::fidl::InterfacePtr {
inner: resolver_client,
version: ConflictResolverFactory_Metadata::VERSION,
};
let _ = fidl::Server::new(ConflictResolverFactoryServer { key }, s2).spawn();
ledger.set_conflict_resolver_factory(Some(resolver_client_ptr)).with(ledger_crash_callback);
}
struct ConflictResolverFactoryServer {
key: Vec<u8>,
}
impl ConflictResolverFactory for ConflictResolverFactoryServer {
fn get_policy(&mut self, _page_id: Vec<u8>) -> Future<MergePolicy, ::fidl::Error> {
Future::done(Ok(MergePolicy_Custom))
}
/// Our resolvers are the same for every page
fn new_conflict_resolver(&mut self, _page_id: Vec<u8>, resolver: ConflictResolver_Server) {
let _ = fidl::Server::new(
ConflictResolverServer { key: self.key.clone() },
resolver.into_channel(),
)
.spawn();
}
}
impl ConflictResolverFactory_Stub for ConflictResolverFactoryServer {
// Use default dispatching, but we could override it here.
}
impl_fidl_stub!(ConflictResolverFactoryServer: ConflictResolverFactory_Stub);
fn state_from_snapshot<F>(
snapshot: ::fidl::InterfacePtr<PageSnapshot_Client>,
key: Vec<u8>,
done: F,
) where
F: Send + FnOnce(Result<Option<Engine>, ()>) + 'static,
{
assert_eq!(PageSnapshot_Metadata::VERSION, snapshot.version);
let mut snapshot | {
SyncUpdater { container_ref, chan }
} | identifier_body |
sync.rs | ).unwrap()
}
fn buf_to_state(buf: &[u8]) -> Result<Engine, serde_json::Error> {
serde_json::from_slice(buf)
}
/// Stores state needed by the container to perform synchronization.
pub struct SyncStore {
page: Page_Proxy,
key: Vec<u8>,
updates: Sender<SyncMsg>,
transaction_pending: bool,
buffer: BufferIdentifier,
}
impl SyncStore {
/// - `page` is a reference to the Ledger page to store data under.
/// - `key` is the key the `Syncable` managed by this `SyncStore` will be stored under.
/// This example only supports storing things under a single key per page.
/// - `updates` is a channel to a `SyncUpdater` that will handle events.
///
/// Returns a sync store and schedules the loading of initial
/// state and subscribes to state updates for this document.
pub fn new(
mut page: Page_Proxy,
key: Vec<u8>,
updates: Sender<SyncMsg>,
buffer: BufferIdentifier,
) -> SyncStore {
let (s1, s2) = Channel::create(ChannelOpts::Normal).unwrap();
let watcher_client = PageWatcher_Client::from_handle(s1.into_handle());
let watcher_client_ptr =
::fidl::InterfacePtr { inner: watcher_client, version: PageWatcher_Metadata::VERSION };
let watcher = PageWatcherServer { updates: updates.clone(), buffer: buffer.clone() };
let _ = fidl::Server::new(watcher, s2).spawn();
let (mut snap, snap_request) = PageSnapshot_new_pair();
page.get_snapshot(snap_request, Some(key.clone()), Some(watcher_client_ptr))
.with(ledger_crash_callback);
let initial_state_chan = updates.clone();
let initial_buffer = buffer.clone();
snap.get(key.clone()).with(move |raw_res| {
match raw_res.map(|res| ledger::value_result(res)) {
Ok(Ok(Some(buf))) => {
initial_state_chan
.send(SyncMsg::NewState {
buffer: initial_buffer,
new_buf: buf,
done: None,
})
.unwrap();
}
Ok(Ok(None)) => (), // No initial state saved yet
Err(err) => error!("FIDL failed on initial response: {:?}", err),
Ok(Err(err)) => error!("Ledger failed to retrieve key: {:?}", err),
}
});
SyncStore { page, key, updates, buffer, transaction_pending: false }
}
/// Called whenever this app changed its own state and would like to
/// persist the changes to the ledger. Changes can't be committed
/// immediately since we have to wait for PageWatcher changes that may not
/// have arrived yet.
pub fn state_changed(&mut self) {
if !self.transaction_pending {
self.transaction_pending = true;
let ready_future = self.page.start_transaction();
let done_chan = self.updates.clone();
let buffer = self.buffer.clone();
ready_future.with(move |res| match res {
Ok(ledger::OK) => {
done_chan.send(SyncMsg::TransactionReady { buffer }).unwrap();
}
Ok(err_status) => error!("Ledger failed to start transaction: {:?}", err_status),
Err(err) => error!("FIDL failed on starting transaction: {:?}", err),
});
}
}
/// Should be called in SyncContainer::transaction_ready to persist the current state.
pub fn commit_transaction(&mut self, state: &Engine) {
assert!(self.transaction_pending, "must call state_changed (and wait) before commit");
self.page.put(self.key.clone(), state_to_buf(state)).with(ledger_crash_callback);
self.page.commit().with(ledger_crash_callback);
self.transaction_pending = false;
}
}
/// All the different asynchronous events the updater thread needs to listen for and act on
pub enum SyncMsg {
NewState {
buffer: BufferIdentifier,
new_buf: Vec<u8>,
done: Option<Promise<Option<PageSnapshot_Server>, fidl::Error>>,
},
TransactionReady {
buffer: BufferIdentifier,
},
/// Shut down the updater thread
Stop,
}
/// We want to be able to register to receive events from inside the
/// `SyncStore`/`SyncContainer` but from there we don't have access to the
/// Mutex that holds the container, so we give channel Senders to all the
/// futures so that they can all trigger events in one place that does have
/// the right reference.
///
/// Additionally, the individual `Editor`s aren't wrapped in a `Mutex` so we
/// have to hold a `BufferContainerRef` and use `BufferIdentifier`s with one
/// `SyncUpdater` for all buffers.
pub struct SyncUpdater<W: Write> {
container_ref: BufferContainerRef<W>,
chan: Receiver<SyncMsg>,
}
impl<W: Write + Send + 'static> SyncUpdater<W> {
pub fn new(container_ref: BufferContainerRef<W>, chan: Receiver<SyncMsg>) -> SyncUpdater<W> {
SyncUpdater { container_ref, chan }
}
/// Run this in a thread, it will return when it encounters an error
/// reading the channel or when the `Stop` message is recieved.
pub fn work(&self) -> Result<(), RecvError> {
loop {
let msg = self.chan.recv()?;
match msg {
SyncMsg::Stop => return Ok(()),
SyncMsg::TransactionReady { buffer } => {
let mut container = self.container_ref.lock();
// if the buffer was closed, hopefully the page connection was as well, which I hope aborts transactions
if let Some(mut editor) = container.editor_for_buffer_mut(&buffer) {
editor.transaction_ready();
}
}
SyncMsg::NewState { new_buf, done, buffer } => {
let mut container = self.container_ref.lock();
match (container.editor_for_buffer_mut(&buffer), buf_to_state(&new_buf)) {
(Some(mut editor), Ok(new_state)) => {
editor.merge_new_state(new_state);
if let Some(promise) = done {
promise.set_ok(None);
}
}
(None, _) => (), // buffer was closed
(_, Err(err)) => error!("Ledger was set to invalid state: {:?}", err),
}
}
}
}
}
}
struct PageWatcherServer {
updates: Sender<SyncMsg>,
buffer: BufferIdentifier,
}
impl PageWatcher for PageWatcherServer {
fn on_change(
&mut self,
page_change: PageChange,
result_state: ResultState,
) -> Future<Option<PageSnapshot_Server>, fidl::Error> {
let (future, done) = Future::make_promise();
let value_opt = page_change.changes.get(0).and_then(|c| c.value.as_ref());
if let (ledger::RESULT_COMPLETED, Some(value_vmo)) = (result_state, value_opt) {
let new_buf = read_entire_vmo(value_vmo).expect("failed to read key Vmo");
self.updates
.send(SyncMsg::NewState { buffer: self.buffer.clone(), new_buf, done: Some(done) })
.unwrap();
} else {
error!("Xi state corrupted, should have one key but has multiple.");
// I don't think this should be a FIDL-level error, so set okay
done.set_ok(None);
}
future
}
}
impl PageWatcher_Stub for PageWatcherServer {
// Use default dispatching, but we could override it here.
}
impl_fidl_stub!(PageWatcherServer: PageWatcher_Stub);
// ============= Conflict resolution
pub fn | (ledger: &mut Ledger_Proxy, key: Vec<u8>) {
let (s1, s2) = Channel::create(ChannelOpts::Normal).unwrap();
let resolver_client = ConflictResolverFactory_Client::from_handle(s1.into_handle());
let resolver_client_ptr = ::fidl::InterfacePtr {
inner: resolver_client,
version: ConflictResolverFactory_Metadata::VERSION,
};
let _ = fidl::Server::new(ConflictResolverFactoryServer { key }, s2).spawn();
ledger.set_conflict_resolver_factory(Some(resolver_client_ptr)).with(ledger_crash_callback);
}
struct ConflictResolverFactoryServer {
key: Vec<u8>,
}
impl ConflictResolverFactory for ConflictResolverFactoryServer {
fn get_policy(&mut self, _page_id: Vec<u8>) -> Future<MergePolicy, ::fidl::Error> {
Future::done(Ok(MergePolicy_Custom))
}
/// Our resolvers are the same for every page
fn new_conflict_resolver(&mut self, _page_id: Vec<u8>, resolver: ConflictResolver_Server) {
let _ = fidl::Server::new(
ConflictResolverServer { key: self.key.clone() },
resolver.into_channel(),
)
.spawn();
}
}
impl ConflictResolverFactory_Stub for ConflictResolverFactoryServer {
// Use default dispatching, but we could override it here.
}
impl_fidl_stub!(ConflictResolverFactoryServer: ConflictResolverFactory_Stub);
fn state_from_snapshot<F>(
snapshot: ::fidl::InterfacePtr<PageSnapshot_Client>,
key: Vec<u8>,
done: F,
) where
F: Send + FnOnce(Result<Option<Engine>, ()>) + 'static,
{
assert_eq!(PageSnapshot_Metadata::VERSION, snapshot.version);
let mut snapshot | start_conflict_resolver_factory | identifier_name |
sync.rs | ).unwrap()
}
fn buf_to_state(buf: &[u8]) -> Result<Engine, serde_json::Error> {
serde_json::from_slice(buf)
}
/// Stores state needed by the container to perform synchronization.
pub struct SyncStore {
page: Page_Proxy,
key: Vec<u8>,
updates: Sender<SyncMsg>,
transaction_pending: bool,
buffer: BufferIdentifier,
}
impl SyncStore {
/// - `page` is a reference to the Ledger page to store data under.
/// - `key` is the key the `Syncable` managed by this `SyncStore` will be stored under.
/// This example only supports storing things under a single key per page.
/// - `updates` is a channel to a `SyncUpdater` that will handle events.
///
/// Returns a sync store and schedules the loading of initial
/// state and subscribes to state updates for this document.
pub fn new(
mut page: Page_Proxy,
key: Vec<u8>,
updates: Sender<SyncMsg>,
buffer: BufferIdentifier,
) -> SyncStore {
let (s1, s2) = Channel::create(ChannelOpts::Normal).unwrap(); | let watcher = PageWatcherServer { updates: updates.clone(), buffer: buffer.clone() };
let _ = fidl::Server::new(watcher, s2).spawn();
let (mut snap, snap_request) = PageSnapshot_new_pair();
page.get_snapshot(snap_request, Some(key.clone()), Some(watcher_client_ptr))
.with(ledger_crash_callback);
let initial_state_chan = updates.clone();
let initial_buffer = buffer.clone();
snap.get(key.clone()).with(move |raw_res| {
match raw_res.map(|res| ledger::value_result(res)) {
Ok(Ok(Some(buf))) => {
initial_state_chan
.send(SyncMsg::NewState {
buffer: initial_buffer,
new_buf: buf,
done: None,
})
.unwrap();
}
Ok(Ok(None)) => (), // No initial state saved yet
Err(err) => error!("FIDL failed on initial response: {:?}", err),
Ok(Err(err)) => error!("Ledger failed to retrieve key: {:?}", err),
}
});
SyncStore { page, key, updates, buffer, transaction_pending: false }
}
/// Called whenever this app changed its own state and would like to
/// persist the changes to the ledger. Changes can't be committed
/// immediately since we have to wait for PageWatcher changes that may not
/// have arrived yet.
pub fn state_changed(&mut self) {
if !self.transaction_pending {
self.transaction_pending = true;
let ready_future = self.page.start_transaction();
let done_chan = self.updates.clone();
let buffer = self.buffer.clone();
ready_future.with(move |res| match res {
Ok(ledger::OK) => {
done_chan.send(SyncMsg::TransactionReady { buffer }).unwrap();
}
Ok(err_status) => error!("Ledger failed to start transaction: {:?}", err_status),
Err(err) => error!("FIDL failed on starting transaction: {:?}", err),
});
}
}
/// Should be called in SyncContainer::transaction_ready to persist the current state.
pub fn commit_transaction(&mut self, state: &Engine) {
assert!(self.transaction_pending, "must call state_changed (and wait) before commit");
self.page.put(self.key.clone(), state_to_buf(state)).with(ledger_crash_callback);
self.page.commit().with(ledger_crash_callback);
self.transaction_pending = false;
}
}
/// All the different asynchronous events the updater thread needs to listen for and act on
pub enum SyncMsg {
NewState {
buffer: BufferIdentifier,
new_buf: Vec<u8>,
done: Option<Promise<Option<PageSnapshot_Server>, fidl::Error>>,
},
TransactionReady {
buffer: BufferIdentifier,
},
/// Shut down the updater thread
Stop,
}
/// We want to be able to register to receive events from inside the
/// `SyncStore`/`SyncContainer` but from there we don't have access to the
/// Mutex that holds the container, so we give channel Senders to all the
/// futures so that they can all trigger events in one place that does have
/// the right reference.
///
/// Additionally, the individual `Editor`s aren't wrapped in a `Mutex` so we
/// have to hold a `BufferContainerRef` and use `BufferIdentifier`s with one
/// `SyncUpdater` for all buffers.
pub struct SyncUpdater<W: Write> {
container_ref: BufferContainerRef<W>,
chan: Receiver<SyncMsg>,
}
impl<W: Write + Send + 'static> SyncUpdater<W> {
pub fn new(container_ref: BufferContainerRef<W>, chan: Receiver<SyncMsg>) -> SyncUpdater<W> {
SyncUpdater { container_ref, chan }
}
/// Run this in a thread, it will return when it encounters an error
/// reading the channel or when the `Stop` message is recieved.
pub fn work(&self) -> Result<(), RecvError> {
loop {
let msg = self.chan.recv()?;
match msg {
SyncMsg::Stop => return Ok(()),
SyncMsg::TransactionReady { buffer } => {
let mut container = self.container_ref.lock();
// if the buffer was closed, hopefully the page connection was as well, which I hope aborts transactions
if let Some(mut editor) = container.editor_for_buffer_mut(&buffer) {
editor.transaction_ready();
}
}
SyncMsg::NewState { new_buf, done, buffer } => {
let mut container = self.container_ref.lock();
match (container.editor_for_buffer_mut(&buffer), buf_to_state(&new_buf)) {
(Some(mut editor), Ok(new_state)) => {
editor.merge_new_state(new_state);
if let Some(promise) = done {
promise.set_ok(None);
}
}
(None, _) => (), // buffer was closed
(_, Err(err)) => error!("Ledger was set to invalid state: {:?}", err),
}
}
}
}
}
}
struct PageWatcherServer {
updates: Sender<SyncMsg>,
buffer: BufferIdentifier,
}
impl PageWatcher for PageWatcherServer {
fn on_change(
&mut self,
page_change: PageChange,
result_state: ResultState,
) -> Future<Option<PageSnapshot_Server>, fidl::Error> {
let (future, done) = Future::make_promise();
let value_opt = page_change.changes.get(0).and_then(|c| c.value.as_ref());
if let (ledger::RESULT_COMPLETED, Some(value_vmo)) = (result_state, value_opt) {
let new_buf = read_entire_vmo(value_vmo).expect("failed to read key Vmo");
self.updates
.send(SyncMsg::NewState { buffer: self.buffer.clone(), new_buf, done: Some(done) })
.unwrap();
} else {
error!("Xi state corrupted, should have one key but has multiple.");
// I don't think this should be a FIDL-level error, so set okay
done.set_ok(None);
}
future
}
}
impl PageWatcher_Stub for PageWatcherServer {
// Use default dispatching, but we could override it here.
}
impl_fidl_stub!(PageWatcherServer: PageWatcher_Stub);
// ============= Conflict resolution
pub fn start_conflict_resolver_factory(ledger: &mut Ledger_Proxy, key: Vec<u8>) {
let (s1, s2) = Channel::create(ChannelOpts::Normal).unwrap();
let resolver_client = ConflictResolverFactory_Client::from_handle(s1.into_handle());
let resolver_client_ptr = ::fidl::InterfacePtr {
inner: resolver_client,
version: ConflictResolverFactory_Metadata::VERSION,
};
let _ = fidl::Server::new(ConflictResolverFactoryServer { key }, s2).spawn();
ledger.set_conflict_resolver_factory(Some(resolver_client_ptr)).with(ledger_crash_callback);
}
struct ConflictResolverFactoryServer {
key: Vec<u8>,
}
impl ConflictResolverFactory for ConflictResolverFactoryServer {
fn get_policy(&mut self, _page_id: Vec<u8>) -> Future<MergePolicy, ::fidl::Error> {
Future::done(Ok(MergePolicy_Custom))
}
/// Our resolvers are the same for every page
fn new_conflict_resolver(&mut self, _page_id: Vec<u8>, resolver: ConflictResolver_Server) {
let _ = fidl::Server::new(
ConflictResolverServer { key: self.key.clone() },
resolver.into_channel(),
)
.spawn();
}
}
impl ConflictResolverFactory_Stub for ConflictResolverFactoryServer {
// Use default dispatching, but we could override it here.
}
impl_fidl_stub!(ConflictResolverFactoryServer: ConflictResolverFactory_Stub);
fn state_from_snapshot<F>(
snapshot: ::fidl::InterfacePtr<PageSnapshot_Client>,
key: Vec<u8>,
done: F,
) where
F: Send + FnOnce(Result<Option<Engine>, ()>) + 'static,
{
assert_eq!(PageSnapshot_Metadata::VERSION, snapshot.version);
let mut snapshot_proxy | let watcher_client = PageWatcher_Client::from_handle(s1.into_handle());
let watcher_client_ptr =
::fidl::InterfacePtr { inner: watcher_client, version: PageWatcher_Metadata::VERSION };
| random_line_split |
test_rnn2rnn_power.py | ample("15min").sum().reset_index()
power_15min = power_15min.pivot(index='cid', columns='data_time', values='value')
power_daily = power.set_index("data_time").groupby("cid").resample("1D").sum().reset_index()
power_daily = power_daily.pivot(index='cid', columns='data_time', values='value')
xy_15min = power_15min.values.reshape(62, -1, 4*24) # (62, 1082, 96)
xy_daily = power_daily.values
N_TEST = 30
N_VALID = 2
DEC_LEN = 2
ENC_LEN = 7
drop_before = 1000
starts, ends = F.get_valid_start_end(np.isnan(xy_daily))
corr_7 = F.batch_autocorr(xy_daily, 7, starts, ends, 1.05, use_smooth=False, smooth_offset=None)
corr_14 = F.batch_autocorr(xy_daily, 14, starts, ends, 1.05, use_smooth=False, smooth_offset=None)
corr_365 = F.batch_autocorr(xy_daily, 365, starts, ends, 1.05, use_smooth=True, smooth_offset=2)
xy_daily_auto_corr = np.concatenate([corr_7, corr_14, corr_365], 1)
xy_daily_auto_corr = normalize(xy_daily_auto_corr, 0)[0]
xy_lags = normalize(F.make_lags(xy_daily, [7, 14, 365])[:, :, drop_before:], axis=2)[0].transpose([0, 2, 1])
weights = (~np.bitwise_or(np.isnan(xy_15min), xy_15min == 0)).astype("float32")[:, drop_before:]
# weights[:, :, np.where((power.columns >= "2020-02-01") & (power.columns < "2020-03-01"), 1, 0)] = 0.01
# weights = weights * xy_mean / xy_mean.mean()
# weights = weights.transpose([0, 2, 1])
xy_cat = np.expand_dims(np.arange(len(weights)), 1)
def get_holiday_features(dts):
select_holidays = ["Spring Festival", "National Day", "Labour Day", "New Year's Day", "Mid-autumn Festival", "Tomb-sweeping Day"]
def _get_holidays(x):
is_holiday, holiday_name = calendar.get_holiday_detail(x)
if holiday_name in select_holidays and is_holiday:
return holiday_name
holidays = pd.get_dummies(pd.Series(dts).apply(lambda x: _get_holidays(x)))
holidays['sick'] = np.where((power_daily.columns >= "2020-02-01") & (power_daily.columns < "2020-03-01"), 1, 0)
holidays.index = dts
return holidays
def holiday_apply(x, holidays, func):
result = pd.DataFrame()
for h in holidays.columns:
result[h] = x.loc[:, holidays[h].values.astype(bool)].agg(func, axis=1).values
return result
holidays = get_holiday_features(power_daily.columns)
xy_holiday_mean = holiday_apply(power_daily, holidays, np.mean).values
xy_holiday_mean = normalize(xy_holiday_mean, 0)[0]
xy_weekday = pd.get_dummies(power_daily.columns.weekday).values
xy_hour = pd.get_dummies(power_daily.columns.hour).values
xy_month = pd.get_dummies(power_daily.columns.month).values
xy_date = np.concatenate([xy_weekday, xy_hour, xy_month, holidays], 1)[drop_before:]
xy_date = np.repeat(np.expand_dims(xy_date, 0), xy_daily.shape[0], axis=0)
xy, xy_mean, xy_std = normalize(xy_15min[:, drop_before:], 1)
class ForwardSpliter:
|
spliter = ForwardSpliter()
train_idx, valid_idx = spliter.split(np.arange(xy.shape[1]), ENC_LEN, N_TEST + N_VALID)
valid_idx, test_idx = spliter.split(valid_idx, ENC_LEN, N_TEST)
train_xy = TimeSeries(xy[:, train_idx])
valid_xy = TimeSeries(xy[:, valid_idx])
trn_weight = TimeSeries(weights[:, train_idx])
val_weight = TimeSeries(weights[:, valid_idx])
trn_enc_cat = [Property(xy_cat)]
val_enc_cat = [Property(xy_cat)]
trn_dec_cat = [Property(xy_cat)]
val_dec_cat = [Property(xy_cat)]
trn_enc_num = [TimeSeries(xy_date[:, train_idx]), Property(xy_holiday_mean),
TimeSeries(xy_lags[:, train_idx]), Property(xy_daily_auto_corr)]
val_enc_num = [TimeSeries(xy_date[:, valid_idx]), Property(xy_holiday_mean),
TimeSeries(xy_lags[:, valid_idx]), Property(xy_daily_auto_corr)]
trn_dec_num = [TimeSeries(xy_date[:, train_idx]), Property(xy_holiday_mean),
TimeSeries(xy_lags[:, train_idx]), Property(xy_daily_auto_corr)]
val_dec_num = [TimeSeries(xy_date[:, valid_idx]), Property(xy_holiday_mean),
TimeSeries(xy_lags[:, valid_idx]), Property(xy_daily_auto_corr)]
train_frame = Seq2SeqDataLoader(train_xy, batch_size=8, enc_lens=ENC_LEN, dec_lens=DEC_LEN, use_cuda=True,
mode='train', time_free_space=0, enc_num_feats=trn_enc_num,
enc_cat_feats=trn_enc_cat, dec_num_feats=trn_dec_num,
dec_cat_feats=trn_dec_cat,
weights=trn_weight, seq_last=False)
valid_frame = Seq2SeqDataLoader(valid_xy, batch_size=64, enc_lens=ENC_LEN, dec_lens=DEC_LEN, use_cuda=True,
mode='valid', time_free_space=0,
time_interval=48,
enc_num_feats=val_enc_num,
enc_cat_feats=val_enc_cat,
dec_num_feats=val_dec_num,
dec_cat_feats=val_dec_cat,
seq_last=False)
model = RNN2RNN(96, hidden_size=256, compress_size=128, enc_num_size=40,
enc_cat_size=[(62, 4)], dec_num_size=40, dec_cat_size=[(62, 4)], residual=True,
beta1=.0, beta2=.0, attn_heads=1, attn_size=128, num_layers=1, dropout=0.0, rnn_type='GRU')
opt = Adam(model.parameters(), 0.001)
loss_fn = MSELoss()
model.cuda()
lr_scheduler = ReduceCosineAnnealingLR(opt, 64, eta_min=5e-5)
learner = Learner(model, opt, './power_preds', verbose=20, lr_scheduler=lr_scheduler)
learner.fit(300, train_frame, valid_frame, patient=128, start_save=1, early_stopping=True)
learner.load(299)
learner.model.eval()
preds = []
trues = []
for batch in valid_frame:
batch[0].pop('dec_x')
preds.append(learner.model(**batch[0])[0])
trues.append(batch[1])
trues = torch.cat(trues, 2).squeeze().cpu().numpy() * xy_std + xy_mean
preds = torch.cat(preds, 2).squeeze().detach().cpu().numpy() * xy_std + xy_mean
k = 42
plt.plot(trues[k].reshape(-1))
plt.plot(preds[k].reshape(-1), label='preds')
plt.legend()
test_xy = torch.as_tensor(xy[:, test_idx]).float().cuda()
test_xy_num_feats = torch.as_tensor(
np.concatenate([xy_date[:, test_idx], np.repeat(np.expand_dims(xy_holiday_mean, 1), len(test_idx), 1),
xy_lags[:, test_idx], np.repeat(np.expand_dims(xy_daily_auto_corr, 1), len(test_idx), 1)],
axis=2)).float().cuda()
test_xy_cat_feats = torch.as_tensor(np.repeat(np.expand_dims(xy_cat, 1), test_xy.shape[1], 1)).long().cuda()
def plot(x_true, y_true, y_pred):
enc_ticks = np.arange(x_true.shape[1])
dec_ticks = np.arange(y_pred.shape[1]) + x_true.shape[1]
for idx in range(x_true.shape[0]):
plt.figure(figsize=(12, 3))
plt.plot(enc_ticks, x_true[idx])
plt.plot(dec_ticks, y_pred[idx], label='pred')
plt.plot(dec_ticks, y_true[idx], label='true')
plt.title(idx)
plt.legend()
def wmape(y_hat, y):
scores = []
for day in range(int(y.shape[0] / 24)):
scores.append(np.abs(y[day * 24: (day + 1) * 24] - y_hat[day * 24: (day + 1) * | def split(self, time_idx, enc_len, valid_size):
if valid_size < 1:
valid_size = int(np.floor(len(time_idx) * valid_size))
valid_idx = time_idx[-(valid_size + enc_len):]
train_idx = time_idx[:-valid_size]
return train_idx, valid_idx | identifier_body |
test_rnn2rnn_power.py | (x, axis, fill_zero=True):
mu = np.nanmean(x, axis, keepdims=True)
std = np.nanstd(x, axis, keepdims=True)
x_norm = (x - mu) / std
if fill_zero:
x_norm = np.nan_to_num(x_norm)
return x_norm, mu, std
power = pd.read_csv('./data/df.csv', parse_dates=['data_time'])[['data_time', 'cid', 'value']]
power_15min = power.set_index("data_time").groupby("cid").resample("15min").sum().reset_index()
power_15min = power_15min.pivot(index='cid', columns='data_time', values='value')
power_daily = power.set_index("data_time").groupby("cid").resample("1D").sum().reset_index()
power_daily = power_daily.pivot(index='cid', columns='data_time', values='value')
xy_15min = power_15min.values.reshape(62, -1, 4*24) # (62, 1082, 96)
xy_daily = power_daily.values
N_TEST = 30
N_VALID = 2
DEC_LEN = 2
ENC_LEN = 7
drop_before = 1000
starts, ends = F.get_valid_start_end(np.isnan(xy_daily))
corr_7 = F.batch_autocorr(xy_daily, 7, starts, ends, 1.05, use_smooth=False, smooth_offset=None)
corr_14 = F.batch_autocorr(xy_daily, 14, starts, ends, 1.05, use_smooth=False, smooth_offset=None)
corr_365 = F.batch_autocorr(xy_daily, 365, starts, ends, 1.05, use_smooth=True, smooth_offset=2)
xy_daily_auto_corr = np.concatenate([corr_7, corr_14, corr_365], 1)
xy_daily_auto_corr = normalize(xy_daily_auto_corr, 0)[0]
xy_lags = normalize(F.make_lags(xy_daily, [7, 14, 365])[:, :, drop_before:], axis=2)[0].transpose([0, 2, 1])
weights = (~np.bitwise_or(np.isnan(xy_15min), xy_15min == 0)).astype("float32")[:, drop_before:]
# weights[:, :, np.where((power.columns >= "2020-02-01") & (power.columns < "2020-03-01"), 1, 0)] = 0.01
# weights = weights * xy_mean / xy_mean.mean()
# weights = weights.transpose([0, 2, 1])
xy_cat = np.expand_dims(np.arange(len(weights)), 1)
def get_holiday_features(dts):
select_holidays = ["Spring Festival", "National Day", "Labour Day", "New Year's Day", "Mid-autumn Festival", "Tomb-sweeping Day"]
def _get_holidays(x):
is_holiday, holiday_name = calendar.get_holiday_detail(x)
if holiday_name in select_holidays and is_holiday:
return holiday_name
holidays = pd.get_dummies(pd.Series(dts).apply(lambda x: _get_holidays(x)))
holidays['sick'] = np.where((power_daily.columns >= "2020-02-01") & (power_daily.columns < "2020-03-01"), 1, 0)
holidays.index = dts
return holidays
def holiday_apply(x, holidays, func):
result = pd.DataFrame()
for h in holidays.columns:
result[h] = x.loc[:, holidays[h].values.astype(bool)].agg(func, axis=1).values
return result
holidays = get_holiday_features(power_daily.columns)
xy_holiday_mean = holiday_apply(power_daily, holidays, np.mean).values
xy_holiday_mean = normalize(xy_holiday_mean, 0)[0]
xy_weekday = pd.get_dummies(power_daily.columns.weekday).values
xy_hour = pd.get_dummies(power_daily.columns.hour).values
xy_month = pd.get_dummies(power_daily.columns.month).values
xy_date = np.concatenate([xy_weekday, xy_hour, xy_month, holidays], 1)[drop_before:]
xy_date = np.repeat(np.expand_dims(xy_date, 0), xy_daily.shape[0], axis=0)
xy, xy_mean, xy_std = normalize(xy_15min[:, drop_before:], 1)
class ForwardSpliter:
def split(self, time_idx, enc_len, valid_size):
if valid_size < 1:
valid_size = int(np.floor(len(time_idx) * valid_size))
valid_idx = time_idx[-(valid_size + enc_len):]
train_idx = time_idx[:-valid_size]
return train_idx, valid_idx
spliter = ForwardSpliter()
train_idx, valid_idx = spliter.split(np.arange(xy.shape[1]), ENC_LEN, N_TEST + N_VALID)
valid_idx, test_idx = spliter.split(valid_idx, ENC_LEN, N_TEST)
train_xy = TimeSeries(xy[:, train_idx])
valid_xy = TimeSeries(xy[:, valid_idx])
trn_weight = TimeSeries(weights[:, train_idx])
val_weight = TimeSeries(weights[:, valid_idx])
trn_enc_cat = [Property(xy_cat)]
val_enc_cat = [Property(xy_cat)]
trn_dec_cat = [Property(xy_cat)]
val_dec_cat = [Property(xy_cat)]
trn_enc_num = [TimeSeries(xy_date[:, train_idx]), Property(xy_holiday_mean),
TimeSeries(xy_lags[:, train_idx]), Property(xy_daily_auto_corr)]
val_enc_num = [TimeSeries(xy_date[:, valid_idx]), Property(xy_holiday_mean),
TimeSeries(xy_lags[:, valid_idx]), Property(xy_daily_auto_corr)]
trn_dec_num = [TimeSeries(xy_date[:, train_idx]), Property(xy_holiday_mean),
TimeSeries(xy_lags[:, train_idx]), Property(xy_daily_auto_corr)]
val_dec_num = [TimeSeries(xy_date[:, valid_idx]), Property(xy_holiday_mean),
TimeSeries(xy_lags[:, valid_idx]), Property(xy_daily_auto_corr)]
train_frame = Seq2SeqDataLoader(train_xy, batch_size=8, enc_lens=ENC_LEN, dec_lens=DEC_LEN, use_cuda=True,
mode='train', time_free_space=0, enc_num_feats=trn_enc_num,
enc_cat_feats=trn_enc_cat, dec_num_feats=trn_dec_num,
dec_cat_feats=trn_dec_cat,
weights=trn_weight, seq_last=False)
valid_frame = Seq2SeqDataLoader(valid_xy, batch_size=64, enc_lens=ENC_LEN, dec_lens=DEC_LEN, use_cuda=True,
mode='valid', time_free_space=0,
time_interval=48,
enc_num_feats=val_enc_num,
enc_cat_feats=val_enc_cat,
dec_num_feats=val_dec_num,
dec_cat_feats=val_dec_cat,
seq_last=False)
model = RNN2RNN(96, hidden_size=256, compress_size=128, enc_num_size=40,
enc_cat_size=[(62, 4)], dec_num_size=40, dec_cat_size=[(62, 4)], residual=True,
beta1=.0, beta2=.0, attn_heads=1, attn_size=128, num_layers=1, dropout=0.0, rnn_type='GRU')
opt = Adam(model.parameters(), 0.001)
loss_fn = MSELoss()
model.cuda()
lr_scheduler = ReduceCosineAnnealingLR(opt, 64, eta_min=5e-5)
learner = Learner(model, opt, './power_preds', verbose=20, lr_scheduler=lr_scheduler)
learner.fit(300, train_frame, valid_frame, patient=128, start_save=1, early_stopping=True)
learner.load(299)
learner.model.eval()
preds = []
trues = []
for batch in valid_frame:
batch[0].pop('dec_x')
preds.append(learner.model(**batch[0])[0])
trues.append(batch[1])
trues = torch.cat(trues, 2).squeeze().cpu().numpy() * xy_std + xy_mean
preds = torch.cat(preds, 2).squeeze().detach().cpu().numpy() * xy_std + xy_mean
k = 42
plt.plot(trues[k].reshape(-1))
plt.plot(preds[k].reshape(-1), label='preds')
plt.legend()
test_xy = torch.as_tensor(xy[:, test_idx]).float().cuda()
test_xy_num_feats = torch.as_tensor(
np.concatenate([xy_date[:, test_idx], np.repeat(np.expand_dims(xy_holiday_mean, 1), len(test_idx), 1),
xy_lags[:, test_idx], np.repeat(np.expand_dims(xy_daily_auto_corr, 1), len(test_idx), 1)],
axis=2)).float().cuda()
test_xy_cat_feats = torch.as_tensor(np.repeat(np.expand_dims(xy_cat, 1), test_xy.shape[1], 1)).long().cuda()
def plot(x_true, y_true, y_pred):
enc_ticks = np.arange(x_true.shape[1])
dec_ticks = np.arange(y_pred.shape[1]) + x_true.shape[1]
for idx in range(x_true.shape[0]):
plt.figure(figsize=(12, | normalize | identifier_name |
|
test_rnn2rnn_power.py | ample("15min").sum().reset_index()
power_15min = power_15min.pivot(index='cid', columns='data_time', values='value')
power_daily = power.set_index("data_time").groupby("cid").resample("1D").sum().reset_index()
power_daily = power_daily.pivot(index='cid', columns='data_time', values='value')
xy_15min = power_15min.values.reshape(62, -1, 4*24) # (62, 1082, 96)
xy_daily = power_daily.values
N_TEST = 30
N_VALID = 2
DEC_LEN = 2
ENC_LEN = 7
drop_before = 1000
starts, ends = F.get_valid_start_end(np.isnan(xy_daily))
corr_7 = F.batch_autocorr(xy_daily, 7, starts, ends, 1.05, use_smooth=False, smooth_offset=None)
corr_14 = F.batch_autocorr(xy_daily, 14, starts, ends, 1.05, use_smooth=False, smooth_offset=None)
corr_365 = F.batch_autocorr(xy_daily, 365, starts, ends, 1.05, use_smooth=True, smooth_offset=2)
xy_daily_auto_corr = np.concatenate([corr_7, corr_14, corr_365], 1)
xy_daily_auto_corr = normalize(xy_daily_auto_corr, 0)[0]
xy_lags = normalize(F.make_lags(xy_daily, [7, 14, 365])[:, :, drop_before:], axis=2)[0].transpose([0, 2, 1])
weights = (~np.bitwise_or(np.isnan(xy_15min), xy_15min == 0)).astype("float32")[:, drop_before:]
# weights[:, :, np.where((power.columns >= "2020-02-01") & (power.columns < "2020-03-01"), 1, 0)] = 0.01
# weights = weights * xy_mean / xy_mean.mean()
# weights = weights.transpose([0, 2, 1])
xy_cat = np.expand_dims(np.arange(len(weights)), 1)
def get_holiday_features(dts):
select_holidays = ["Spring Festival", "National Day", "Labour Day", "New Year's Day", "Mid-autumn Festival", "Tomb-sweeping Day"]
def _get_holidays(x):
is_holiday, holiday_name = calendar.get_holiday_detail(x)
if holiday_name in select_holidays and is_holiday:
return holiday_name
holidays = pd.get_dummies(pd.Series(dts).apply(lambda x: _get_holidays(x)))
holidays['sick'] = np.where((power_daily.columns >= "2020-02-01") & (power_daily.columns < "2020-03-01"), 1, 0)
holidays.index = dts
return holidays
def holiday_apply(x, holidays, func):
result = pd.DataFrame()
for h in holidays.columns:
result[h] = x.loc[:, holidays[h].values.astype(bool)].agg(func, axis=1).values
return result
holidays = get_holiday_features(power_daily.columns)
xy_holiday_mean = holiday_apply(power_daily, holidays, np.mean).values
xy_holiday_mean = normalize(xy_holiday_mean, 0)[0]
xy_weekday = pd.get_dummies(power_daily.columns.weekday).values
xy_hour = pd.get_dummies(power_daily.columns.hour).values
xy_month = pd.get_dummies(power_daily.columns.month).values
xy_date = np.concatenate([xy_weekday, xy_hour, xy_month, holidays], 1)[drop_before:]
xy_date = np.repeat(np.expand_dims(xy_date, 0), xy_daily.shape[0], axis=0)
xy, xy_mean, xy_std = normalize(xy_15min[:, drop_before:], 1)
class ForwardSpliter:
def split(self, time_idx, enc_len, valid_size):
if valid_size < 1:
valid_size = int(np.floor(len(time_idx) * valid_size))
valid_idx = time_idx[-(valid_size + enc_len):]
train_idx = time_idx[:-valid_size]
return train_idx, valid_idx
spliter = ForwardSpliter()
train_idx, valid_idx = spliter.split(np.arange(xy.shape[1]), ENC_LEN, N_TEST + N_VALID)
valid_idx, test_idx = spliter.split(valid_idx, ENC_LEN, N_TEST)
train_xy = TimeSeries(xy[:, train_idx])
valid_xy = TimeSeries(xy[:, valid_idx])
trn_weight = TimeSeries(weights[:, train_idx])
val_weight = TimeSeries(weights[:, valid_idx])
trn_enc_cat = [Property(xy_cat)]
val_enc_cat = [Property(xy_cat)]
trn_dec_cat = [Property(xy_cat)]
val_dec_cat = [Property(xy_cat)]
trn_enc_num = [TimeSeries(xy_date[:, train_idx]), Property(xy_holiday_mean),
TimeSeries(xy_lags[:, train_idx]), Property(xy_daily_auto_corr)]
val_enc_num = [TimeSeries(xy_date[:, valid_idx]), Property(xy_holiday_mean),
TimeSeries(xy_lags[:, valid_idx]), Property(xy_daily_auto_corr)]
trn_dec_num = [TimeSeries(xy_date[:, train_idx]), Property(xy_holiday_mean),
TimeSeries(xy_lags[:, train_idx]), Property(xy_daily_auto_corr)]
val_dec_num = [TimeSeries(xy_date[:, valid_idx]), Property(xy_holiday_mean),
TimeSeries(xy_lags[:, valid_idx]), Property(xy_daily_auto_corr)]
train_frame = Seq2SeqDataLoader(train_xy, batch_size=8, enc_lens=ENC_LEN, dec_lens=DEC_LEN, use_cuda=True, | valid_frame = Seq2SeqDataLoader(valid_xy, batch_size=64, enc_lens=ENC_LEN, dec_lens=DEC_LEN, use_cuda=True,
mode='valid', time_free_space=0,
time_interval=48,
enc_num_feats=val_enc_num,
enc_cat_feats=val_enc_cat,
dec_num_feats=val_dec_num,
dec_cat_feats=val_dec_cat,
seq_last=False)
model = RNN2RNN(96, hidden_size=256, compress_size=128, enc_num_size=40,
enc_cat_size=[(62, 4)], dec_num_size=40, dec_cat_size=[(62, 4)], residual=True,
beta1=.0, beta2=.0, attn_heads=1, attn_size=128, num_layers=1, dropout=0.0, rnn_type='GRU')
opt = Adam(model.parameters(), 0.001)
loss_fn = MSELoss()
model.cuda()
lr_scheduler = ReduceCosineAnnealingLR(opt, 64, eta_min=5e-5)
learner = Learner(model, opt, './power_preds', verbose=20, lr_scheduler=lr_scheduler)
learner.fit(300, train_frame, valid_frame, patient=128, start_save=1, early_stopping=True)
learner.load(299)
learner.model.eval()
preds = []
trues = []
for batch in valid_frame:
batch[0].pop('dec_x')
preds.append(learner.model(**batch[0])[0])
trues.append(batch[1])
trues = torch.cat(trues, 2).squeeze().cpu().numpy() * xy_std + xy_mean
preds = torch.cat(preds, 2).squeeze().detach().cpu().numpy() * xy_std + xy_mean
k = 42
plt.plot(trues[k].reshape(-1))
plt.plot(preds[k].reshape(-1), label='preds')
plt.legend()
test_xy = torch.as_tensor(xy[:, test_idx]).float().cuda()
test_xy_num_feats = torch.as_tensor(
np.concatenate([xy_date[:, test_idx], np.repeat(np.expand_dims(xy_holiday_mean, 1), len(test_idx), 1),
xy_lags[:, test_idx], np.repeat(np.expand_dims(xy_daily_auto_corr, 1), len(test_idx), 1)],
axis=2)).float().cuda()
test_xy_cat_feats = torch.as_tensor(np.repeat(np.expand_dims(xy_cat, 1), test_xy.shape[1], 1)).long().cuda()
def plot(x_true, y_true, y_pred):
enc_ticks = np.arange(x_true.shape[1])
dec_ticks = np.arange(y_pred.shape[1]) + x_true.shape[1]
for idx in range(x_true.shape[0]):
plt.figure(figsize=(12, 3))
plt.plot(enc_ticks, x_true[idx])
plt.plot(dec_ticks, y_pred[idx], label='pred')
plt.plot(dec_ticks, y_true[idx], label='true')
plt.title(idx)
plt.legend()
def wmape(y_hat, y):
scores = []
for day in range(int(y.shape[0] / 24)):
scores.append(np.abs(y[day * 24: (day + 1) * 24] - y_hat[day * 24: (day + 1) * | mode='train', time_free_space=0, enc_num_feats=trn_enc_num,
enc_cat_feats=trn_enc_cat, dec_num_feats=trn_dec_num,
dec_cat_feats=trn_dec_cat,
weights=trn_weight, seq_last=False) | random_line_split |
test_rnn2rnn_power.py | ample("15min").sum().reset_index()
power_15min = power_15min.pivot(index='cid', columns='data_time', values='value')
power_daily = power.set_index("data_time").groupby("cid").resample("1D").sum().reset_index()
power_daily = power_daily.pivot(index='cid', columns='data_time', values='value')
xy_15min = power_15min.values.reshape(62, -1, 4*24) # (62, 1082, 96)
xy_daily = power_daily.values
N_TEST = 30
N_VALID = 2
DEC_LEN = 2
ENC_LEN = 7
drop_before = 1000
starts, ends = F.get_valid_start_end(np.isnan(xy_daily))
corr_7 = F.batch_autocorr(xy_daily, 7, starts, ends, 1.05, use_smooth=False, smooth_offset=None)
corr_14 = F.batch_autocorr(xy_daily, 14, starts, ends, 1.05, use_smooth=False, smooth_offset=None)
corr_365 = F.batch_autocorr(xy_daily, 365, starts, ends, 1.05, use_smooth=True, smooth_offset=2)
xy_daily_auto_corr = np.concatenate([corr_7, corr_14, corr_365], 1)
xy_daily_auto_corr = normalize(xy_daily_auto_corr, 0)[0]
xy_lags = normalize(F.make_lags(xy_daily, [7, 14, 365])[:, :, drop_before:], axis=2)[0].transpose([0, 2, 1])
weights = (~np.bitwise_or(np.isnan(xy_15min), xy_15min == 0)).astype("float32")[:, drop_before:]
# weights[:, :, np.where((power.columns >= "2020-02-01") & (power.columns < "2020-03-01"), 1, 0)] = 0.01
# weights = weights * xy_mean / xy_mean.mean()
# weights = weights.transpose([0, 2, 1])
xy_cat = np.expand_dims(np.arange(len(weights)), 1)
def get_holiday_features(dts):
select_holidays = ["Spring Festival", "National Day", "Labour Day", "New Year's Day", "Mid-autumn Festival", "Tomb-sweeping Day"]
def _get_holidays(x):
is_holiday, holiday_name = calendar.get_holiday_detail(x)
if holiday_name in select_holidays and is_holiday:
return holiday_name
holidays = pd.get_dummies(pd.Series(dts).apply(lambda x: _get_holidays(x)))
holidays['sick'] = np.where((power_daily.columns >= "2020-02-01") & (power_daily.columns < "2020-03-01"), 1, 0)
holidays.index = dts
return holidays
def holiday_apply(x, holidays, func):
result = pd.DataFrame()
for h in holidays.columns:
|
return result
holidays = get_holiday_features(power_daily.columns)
xy_holiday_mean = holiday_apply(power_daily, holidays, np.mean).values
xy_holiday_mean = normalize(xy_holiday_mean, 0)[0]
xy_weekday = pd.get_dummies(power_daily.columns.weekday).values
xy_hour = pd.get_dummies(power_daily.columns.hour).values
xy_month = pd.get_dummies(power_daily.columns.month).values
xy_date = np.concatenate([xy_weekday, xy_hour, xy_month, holidays], 1)[drop_before:]
xy_date = np.repeat(np.expand_dims(xy_date, 0), xy_daily.shape[0], axis=0)
xy, xy_mean, xy_std = normalize(xy_15min[:, drop_before:], 1)
class ForwardSpliter:
def split(self, time_idx, enc_len, valid_size):
if valid_size < 1:
valid_size = int(np.floor(len(time_idx) * valid_size))
valid_idx = time_idx[-(valid_size + enc_len):]
train_idx = time_idx[:-valid_size]
return train_idx, valid_idx
spliter = ForwardSpliter()
train_idx, valid_idx = spliter.split(np.arange(xy.shape[1]), ENC_LEN, N_TEST + N_VALID)
valid_idx, test_idx = spliter.split(valid_idx, ENC_LEN, N_TEST)
train_xy = TimeSeries(xy[:, train_idx])
valid_xy = TimeSeries(xy[:, valid_idx])
trn_weight = TimeSeries(weights[:, train_idx])
val_weight = TimeSeries(weights[:, valid_idx])
trn_enc_cat = [Property(xy_cat)]
val_enc_cat = [Property(xy_cat)]
trn_dec_cat = [Property(xy_cat)]
val_dec_cat = [Property(xy_cat)]
trn_enc_num = [TimeSeries(xy_date[:, train_idx]), Property(xy_holiday_mean),
TimeSeries(xy_lags[:, train_idx]), Property(xy_daily_auto_corr)]
val_enc_num = [TimeSeries(xy_date[:, valid_idx]), Property(xy_holiday_mean),
TimeSeries(xy_lags[:, valid_idx]), Property(xy_daily_auto_corr)]
trn_dec_num = [TimeSeries(xy_date[:, train_idx]), Property(xy_holiday_mean),
TimeSeries(xy_lags[:, train_idx]), Property(xy_daily_auto_corr)]
val_dec_num = [TimeSeries(xy_date[:, valid_idx]), Property(xy_holiday_mean),
TimeSeries(xy_lags[:, valid_idx]), Property(xy_daily_auto_corr)]
train_frame = Seq2SeqDataLoader(train_xy, batch_size=8, enc_lens=ENC_LEN, dec_lens=DEC_LEN, use_cuda=True,
mode='train', time_free_space=0, enc_num_feats=trn_enc_num,
enc_cat_feats=trn_enc_cat, dec_num_feats=trn_dec_num,
dec_cat_feats=trn_dec_cat,
weights=trn_weight, seq_last=False)
valid_frame = Seq2SeqDataLoader(valid_xy, batch_size=64, enc_lens=ENC_LEN, dec_lens=DEC_LEN, use_cuda=True,
mode='valid', time_free_space=0,
time_interval=48,
enc_num_feats=val_enc_num,
enc_cat_feats=val_enc_cat,
dec_num_feats=val_dec_num,
dec_cat_feats=val_dec_cat,
seq_last=False)
model = RNN2RNN(96, hidden_size=256, compress_size=128, enc_num_size=40,
enc_cat_size=[(62, 4)], dec_num_size=40, dec_cat_size=[(62, 4)], residual=True,
beta1=.0, beta2=.0, attn_heads=1, attn_size=128, num_layers=1, dropout=0.0, rnn_type='GRU')
opt = Adam(model.parameters(), 0.001)
loss_fn = MSELoss()
model.cuda()
lr_scheduler = ReduceCosineAnnealingLR(opt, 64, eta_min=5e-5)
learner = Learner(model, opt, './power_preds', verbose=20, lr_scheduler=lr_scheduler)
learner.fit(300, train_frame, valid_frame, patient=128, start_save=1, early_stopping=True)
learner.load(299)
learner.model.eval()
preds = []
trues = []
for batch in valid_frame:
batch[0].pop('dec_x')
preds.append(learner.model(**batch[0])[0])
trues.append(batch[1])
trues = torch.cat(trues, 2).squeeze().cpu().numpy() * xy_std + xy_mean
preds = torch.cat(preds, 2).squeeze().detach().cpu().numpy() * xy_std + xy_mean
k = 42
plt.plot(trues[k].reshape(-1))
plt.plot(preds[k].reshape(-1), label='preds')
plt.legend()
test_xy = torch.as_tensor(xy[:, test_idx]).float().cuda()
test_xy_num_feats = torch.as_tensor(
np.concatenate([xy_date[:, test_idx], np.repeat(np.expand_dims(xy_holiday_mean, 1), len(test_idx), 1),
xy_lags[:, test_idx], np.repeat(np.expand_dims(xy_daily_auto_corr, 1), len(test_idx), 1)],
axis=2)).float().cuda()
test_xy_cat_feats = torch.as_tensor(np.repeat(np.expand_dims(xy_cat, 1), test_xy.shape[1], 1)).long().cuda()
def plot(x_true, y_true, y_pred):
enc_ticks = np.arange(x_true.shape[1])
dec_ticks = np.arange(y_pred.shape[1]) + x_true.shape[1]
for idx in range(x_true.shape[0]):
plt.figure(figsize=(12, 3))
plt.plot(enc_ticks, x_true[idx])
plt.plot(dec_ticks, y_pred[idx], label='pred')
plt.plot(dec_ticks, y_true[idx], label='true')
plt.title(idx)
plt.legend()
def wmape(y_hat, y):
scores = []
for day in range(int(y.shape[0] / 24)):
scores.append(np.abs(y[day * 24: (day + 1) * 24] - y_hat[day * 24: (day + 1) * | result[h] = x.loc[:, holidays[h].values.astype(bool)].agg(func, axis=1).values | conditional_block |
authconn_internal.py | by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# For those usages not covered by the Apache License, Version 2.0 please
# contact: [email protected] or [email protected]
##
"""
AuthconnInternal implements implements the connector for
OSM Internal Authentication Backend and leverages the RBAC model
"""
__author__ = "Pedro de la Cruz Ramos <[email protected]>"
__date__ = "$06-jun-2019 11:16:08$"
from authconn import Authconn, AuthException
from osm_common.dbbase import DbException
from base_topic import BaseTopic
import logging
from time import time
from http import HTTPStatus
from uuid import uuid4
from hashlib import sha256
from copy import deepcopy
from random import choice as random_choice
class AuthconnInternal(Authconn):
def __init__(self, config, db, token_cache):
Authconn.__init__(self, config)
self.logger = logging.getLogger("nbi.authenticator.internal")
# Get Configuration
# self.xxx = config.get("xxx", "default")
self.db = db
self.token_cache = token_cache
# To be Confirmed
self.auth = None
self.sess = None
# def create_token (self, user, password, projects=[], project=None, remote=None):
# Not Required
# def authenticate_with_user_password(self, user, password, project=None, remote=None):
# Not Required
# def authenticate_with_token(self, token, project=None, remote=None):
# Not Required
# def get_user_project_list(self, token):
# Not Required
# def get_user_role_list(self, token):
# Not Required
# def create_user(self, user, password):
# Not Required
# def change_password(self, user, new_password):
# Not Required
# def delete_user(self, user_id):
# Not Required
# def get_user_list(self, filter_q={}):
# Not Required
# def get_project_list(self, filter_q={}):
# Not required
# def create_project(self, project):
# Not required
# def delete_project(self, project_id):
# Not required
# def assign_role_to_user(self, user, project, role):
# Not required in Phase 1
# def remove_role_from_user(self, user, project, role):
# Not required in Phase 1
def validate_token(self, token):
"""
Check if the token is valid.
:param token: token to validate
:return: dictionary with information associated with the token:
"_id": token id
"project_id": project id
"project_name": project name
"user_id": user id
"username": user name
"roles": list with dict containing {name, id}
"expires": expiration date
If the token is not valid an exception is raised.
"""
try:
if not token:
raise AuthException("Needed a token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED)
# try to get from cache first
now = time()
token_info = self.token_cache.get(token)
if token_info and token_info["expires"] < now:
# delete token. MUST be done with care, as another thread maybe already delete it. Do not use del
self.token_cache.pop(token, None)
token_info = None
# get from database if not in cache
if not token_info:
token_info = self.db.get_one("tokens", {"_id": token})
if token_info["expires"] < now:
raise AuthException("Expired Token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED)
return token_info
except DbException as e:
if e.http_code == HTTPStatus.NOT_FOUND:
raise AuthException("Invalid Token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED)
else:
raise
except AuthException:
if self.config["global"].get("test.user_not_authorized"):
return {"id": "fake-token-id-for-test",
"project_id": self.config["global"].get("test.project_not_authorized", "admin"),
"username": self.config["global"]["test.user_not_authorized"], "admin": True}
else:
raise
except Exception:
self.logger.exception("Error during token validation using internal backend")
raise AuthException("Error during token validation using internal backend",
http_code=HTTPStatus.UNAUTHORIZED)
def revoke_token(self, token):
"""
Invalidate a token.
:param token: token to be revoked
"""
try:
self.token_cache.pop(token, None)
self.db.del_one("tokens", {"_id": token})
return True
except DbException as e:
if e.http_code == HTTPStatus.NOT_FOUND:
raise AuthException("Token '{}' not found".format(token), http_code=HTTPStatus.NOT_FOUND)
else:
# raise
msg = "Error during token revocation using internal backend"
self.logger.exception(msg)
raise AuthException(msg, http_code=HTTPStatus.UNAUTHORIZED)
def authenticate(self, user, password, project=None, token_info=None):
"""
Authenticate a user using username/password or previous token_info plus project; its creates a new token
:param user: user: name, id or None
:param password: password or None
:param project: name, id, or None. If None first found project will be used to get an scope token
:param token_info: previous token_info to obtain authorization
:param remote: remote host information
:return: the scoped token info or raises an exception. The token is a dictionary with:
_id: token string id,
username: username,
project_id: scoped_token project_id,
project_name: scoped_token project_name,
expires: epoch time when it expires,
"""
now = time()
user_content = None
try:
# Try using username/password
if user:
user_rows = self.db.get_list("users", {BaseTopic.id_field("users", user): user})
if user_rows:
user_content = user_rows[0]
salt = user_content["_admin"]["salt"]
shadow_password = sha256(password.encode('utf-8') + salt.encode('utf-8')).hexdigest()
if shadow_password != user_content["password"]:
user_content = None
if not user_content:
raise AuthException("Invalid username/password", http_code=HTTPStatus.UNAUTHORIZED)
elif token_info:
user_rows = self.db.get_list("users", {"username": token_info["username"]})
if user_rows:
user_content = user_rows[0]
else:
raise AuthException("Invalid token", http_code=HTTPStatus.UNAUTHORIZED)
else:
raise AuthException("Provide credentials: username/password or Authorization Bearer token",
http_code=HTTPStatus.UNAUTHORIZED)
token_id = ''.join(random_choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
for _ in range(0, 32))
# TODO when user contained project_role_mappings with project_id,project_ name this checking to | proj = self.db.get_one("projects", {BaseTopic.id_field("projects", project): project})
if proj["_id"] not in user_content["projects"] and proj["name"] not in user_content["projects"]:
raise AuthException("project {} not allowed for this user".format(project),
http_code=HTTPStatus.UNAUTHORIZED)
# TODO remove admin, this vill be used by roles RBAC
if proj["name"] == "admin":
token_admin = True
else:
token_admin = proj.get("admin", False)
# TODO add token roles - PROVISIONAL. Get this list from user_content["project_role_mappings"]
role_id = self.db.get_one("roles", {"name": "system_admin"})["_id"]
roles_list = [{"name": "system_admin", "id": role_id}]
new_token = {"issued_at": now,
"expires": now + 3600,
"_id": token_id,
"id": token_id,
"project_id": proj["_id"],
"project_name": proj["name"],
"username": user_content["username"],
"user_id": user_content["_id"],
"admin": token_admin,
"roles": roles_list,
}
self.token_cache[token_id] = new_token
self.db.create("tokens", new_token)
return deepcopy(new_token)
except Exception as e:
msg = "Error during user authentication using internal backend: {}".format(e)
self.logger.exception(msg)
raise AuthException(msg, http_code=HTTPStatus.UNAUTHORIZED)
def get_role_list(self):
"""
Get role list.
:return: returns the list of roles.
"""
try:
role_list = self.db.get_list("roles")
roles = [{"name": role["name"], "_id": | # database will not be needed
if not project:
project = user_content["projects"][0]
# To allow project names in project_id | random_line_split |
authconn_internal.py | by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# For those usages not covered by the Apache License, Version 2.0 please
# contact: [email protected] or [email protected]
##
"""
AuthconnInternal implements implements the connector for
OSM Internal Authentication Backend and leverages the RBAC model
"""
__author__ = "Pedro de la Cruz Ramos <[email protected]>"
__date__ = "$06-jun-2019 11:16:08$"
from authconn import Authconn, AuthException
from osm_common.dbbase import DbException
from base_topic import BaseTopic
import logging
from time import time
from http import HTTPStatus
from uuid import uuid4
from hashlib import sha256
from copy import deepcopy
from random import choice as random_choice
class AuthconnInternal(Authconn):
def _ | self, config, db, token_cache):
Authconn.__init__(self, config)
self.logger = logging.getLogger("nbi.authenticator.internal")
# Get Configuration
# self.xxx = config.get("xxx", "default")
self.db = db
self.token_cache = token_cache
# To be Confirmed
self.auth = None
self.sess = None
# def create_token (self, user, password, projects=[], project=None, remote=None):
# Not Required
# def authenticate_with_user_password(self, user, password, project=None, remote=None):
# Not Required
# def authenticate_with_token(self, token, project=None, remote=None):
# Not Required
# def get_user_project_list(self, token):
# Not Required
# def get_user_role_list(self, token):
# Not Required
# def create_user(self, user, password):
# Not Required
# def change_password(self, user, new_password):
# Not Required
# def delete_user(self, user_id):
# Not Required
# def get_user_list(self, filter_q={}):
# Not Required
# def get_project_list(self, filter_q={}):
# Not required
# def create_project(self, project):
# Not required
# def delete_project(self, project_id):
# Not required
# def assign_role_to_user(self, user, project, role):
# Not required in Phase 1
# def remove_role_from_user(self, user, project, role):
# Not required in Phase 1
def validate_token(self, token):
"""
Check if the token is valid.
:param token: token to validate
:return: dictionary with information associated with the token:
"_id": token id
"project_id": project id
"project_name": project name
"user_id": user id
"username": user name
"roles": list with dict containing {name, id}
"expires": expiration date
If the token is not valid an exception is raised.
"""
try:
if not token:
raise AuthException("Needed a token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED)
# try to get from cache first
now = time()
token_info = self.token_cache.get(token)
if token_info and token_info["expires"] < now:
# delete token. MUST be done with care, as another thread maybe already delete it. Do not use del
self.token_cache.pop(token, None)
token_info = None
# get from database if not in cache
if not token_info:
token_info = self.db.get_one("tokens", {"_id": token})
if token_info["expires"] < now:
raise AuthException("Expired Token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED)
return token_info
except DbException as e:
if e.http_code == HTTPStatus.NOT_FOUND:
raise AuthException("Invalid Token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED)
else:
raise
except AuthException:
if self.config["global"].get("test.user_not_authorized"):
return {"id": "fake-token-id-for-test",
"project_id": self.config["global"].get("test.project_not_authorized", "admin"),
"username": self.config["global"]["test.user_not_authorized"], "admin": True}
else:
raise
except Exception:
self.logger.exception("Error during token validation using internal backend")
raise AuthException("Error during token validation using internal backend",
http_code=HTTPStatus.UNAUTHORIZED)
def revoke_token(self, token):
"""
Invalidate a token.
:param token: token to be revoked
"""
try:
self.token_cache.pop(token, None)
self.db.del_one("tokens", {"_id": token})
return True
except DbException as e:
if e.http_code == HTTPStatus.NOT_FOUND:
raise AuthException("Token '{}' not found".format(token), http_code=HTTPStatus.NOT_FOUND)
else:
# raise
msg = "Error during token revocation using internal backend"
self.logger.exception(msg)
raise AuthException(msg, http_code=HTTPStatus.UNAUTHORIZED)
def authenticate(self, user, password, project=None, token_info=None):
"""
Authenticate a user using username/password or previous token_info plus project; its creates a new token
:param user: user: name, id or None
:param password: password or None
:param project: name, id, or None. If None first found project will be used to get an scope token
:param token_info: previous token_info to obtain authorization
:param remote: remote host information
:return: the scoped token info or raises an exception. The token is a dictionary with:
_id: token string id,
username: username,
project_id: scoped_token project_id,
project_name: scoped_token project_name,
expires: epoch time when it expires,
"""
now = time()
user_content = None
try:
# Try using username/password
if user:
user_rows = self.db.get_list("users", {BaseTopic.id_field("users", user): user})
if user_rows:
user_content = user_rows[0]
salt = user_content["_admin"]["salt"]
shadow_password = sha256(password.encode('utf-8') + salt.encode('utf-8')).hexdigest()
if shadow_password != user_content["password"]:
user_content = None
if not user_content:
raise AuthException("Invalid username/password", http_code=HTTPStatus.UNAUTHORIZED)
elif token_info:
user_rows = self.db.get_list("users", {"username": token_info["username"]})
if user_rows:
user_content = user_rows[0]
else:
raise AuthException("Invalid token", http_code=HTTPStatus.UNAUTHORIZED)
else:
raise AuthException("Provide credentials: username/password or Authorization Bearer token",
http_code=HTTPStatus.UNAUTHORIZED)
token_id = ''.join(random_choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
for _ in range(0, 32))
# TODO when user contained project_role_mappings with project_id,project_ name this checking to
# database will not be needed
if not project:
project = user_content["projects"][0]
# To allow project names in project_id
proj = self.db.get_one("projects", {BaseTopic.id_field("projects", project): project})
if proj["_id"] not in user_content["projects"] and proj["name"] not in user_content["projects"]:
raise AuthException("project {} not allowed for this user".format(project),
http_code=HTTPStatus.UNAUTHORIZED)
# TODO remove admin, this vill be used by roles RBAC
if proj["name"] == "admin":
token_admin = True
else:
token_admin = proj.get("admin", False)
# TODO add token roles - PROVISIONAL. Get this list from user_content["project_role_mappings"]
role_id = self.db.get_one("roles", {"name": "system_admin"})["_id"]
roles_list = [{"name": "system_admin", "id": role_id}]
new_token = {"issued_at": now,
"expires": now + 3600,
"_id": token_id,
"id": token_id,
"project_id": proj["_id"],
"project_name": proj["name"],
"username": user_content["username"],
"user_id": user_content["_id"],
"admin": token_admin,
"roles": roles_list,
}
self.token_cache[token_id] = new_token
self.db.create("tokens", new_token)
return deepcopy(new_token)
except Exception as e:
msg = "Error during user authentication using internal backend: {}".format(e)
self.logger.exception(msg)
raise AuthException(msg, http_code=HTTPStatus.UNAUTHORIZED)
def get_role_list(self):
"""
Get role list.
:return: returns the list of roles.
"""
try:
role_list = self.db.get_list("roles")
roles = [{"name": role["name"], "_ | _init__( | identifier_name |
authconn_internal.py | applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# For those usages not covered by the Apache License, Version 2.0 please
# contact: [email protected] or [email protected]
##
"""
AuthconnInternal implements implements the connector for
OSM Internal Authentication Backend and leverages the RBAC model
"""
__author__ = "Pedro de la Cruz Ramos <[email protected]>"
__date__ = "$06-jun-2019 11:16:08$"
from authconn import Authconn, AuthException
from osm_common.dbbase import DbException
from base_topic import BaseTopic
import logging
from time import time
from http import HTTPStatus
from uuid import uuid4
from hashlib import sha256
from copy import deepcopy
from random import choice as random_choice
class AuthconnInternal(Authconn):
def __init__(self, config, db, token_cache):
Authconn.__init__(self, config)
self.logger = logging.getLogger("nbi.authenticator.internal")
# Get Configuration
# self.xxx = config.get("xxx", "default")
self.db = db
self.token_cache = token_cache
# To be Confirmed
self.auth = None
self.sess = None
# def create_token (self, user, password, projects=[], project=None, remote=None):
# Not Required
# def authenticate_with_user_password(self, user, password, project=None, remote=None):
# Not Required
# def authenticate_with_token(self, token, project=None, remote=None):
# Not Required
# def get_user_project_list(self, token):
# Not Required
# def get_user_role_list(self, token):
# Not Required
# def create_user(self, user, password):
# Not Required
# def change_password(self, user, new_password):
# Not Required
# def delete_user(self, user_id):
# Not Required
# def get_user_list(self, filter_q={}):
# Not Required
# def get_project_list(self, filter_q={}):
# Not required
# def create_project(self, project):
# Not required
# def delete_project(self, project_id):
# Not required
# def assign_role_to_user(self, user, project, role):
# Not required in Phase 1
# def remove_role_from_user(self, user, project, role):
# Not required in Phase 1
def validate_token(self, token):
"""
Check if the token is valid.
:param token: token to validate
:return: dictionary with information associated with the token:
"_id": token id
"project_id": project id
"project_name": project name
"user_id": user id
"username": user name
"roles": list with dict containing {name, id}
"expires": expiration date
If the token is not valid an exception is raised.
"""
try:
if not token:
raise AuthException("Needed a token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED)
# try to get from cache first
now = time()
token_info = self.token_cache.get(token)
if token_info and token_info["expires"] < now:
# delete token. MUST be done with care, as another thread maybe already delete it. Do not use del
self.token_cache.pop(token, None)
token_info = None
# get from database if not in cache
if not token_info:
token_info = self.db.get_one("tokens", {"_id": token})
if token_info["expires"] < now:
raise AuthException("Expired Token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED)
return token_info
except DbException as e:
if e.http_code == HTTPStatus.NOT_FOUND:
raise AuthException("Invalid Token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED)
else:
raise
except AuthException:
if self.config["global"].get("test.user_not_authorized"):
return {"id": "fake-token-id-for-test",
"project_id": self.config["global"].get("test.project_not_authorized", "admin"),
"username": self.config["global"]["test.user_not_authorized"], "admin": True}
else:
raise
except Exception:
self.logger.exception("Error during token validation using internal backend")
raise AuthException("Error during token validation using internal backend",
http_code=HTTPStatus.UNAUTHORIZED)
def revoke_token(self, token):
" |
def authenticate(self, user, password, project=None, token_info=None):
"""
Authenticate a user using username/password or previous token_info plus project; its creates a new token
:param user: user: name, id or None
:param password: password or None
:param project: name, id, or None. If None first found project will be used to get an scope token
:param token_info: previous token_info to obtain authorization
:param remote: remote host information
:return: the scoped token info or raises an exception. The token is a dictionary with:
_id: token string id,
username: username,
project_id: scoped_token project_id,
project_name: scoped_token project_name,
expires: epoch time when it expires,
"""
now = time()
user_content = None
try:
# Try using username/password
if user:
user_rows = self.db.get_list("users", {BaseTopic.id_field("users", user): user})
if user_rows:
user_content = user_rows[0]
salt = user_content["_admin"]["salt"]
shadow_password = sha256(password.encode('utf-8') + salt.encode('utf-8')).hexdigest()
if shadow_password != user_content["password"]:
user_content = None
if not user_content:
raise AuthException("Invalid username/password", http_code=HTTPStatus.UNAUTHORIZED)
elif token_info:
user_rows = self.db.get_list("users", {"username": token_info["username"]})
if user_rows:
user_content = user_rows[0]
else:
raise AuthException("Invalid token", http_code=HTTPStatus.UNAUTHORIZED)
else:
raise AuthException("Provide credentials: username/password or Authorization Bearer token",
http_code=HTTPStatus.UNAUTHORIZED)
token_id = ''.join(random_choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
for _ in range(0, 32))
# TODO when user contained project_role_mappings with project_id,project_ name this checking to
# database will not be needed
if not project:
project = user_content["projects"][0]
# To allow project names in project_id
proj = self.db.get_one("projects", {BaseTopic.id_field("projects", project): project})
if proj["_id"] not in user_content["projects"] and proj["name"] not in user_content["projects"]:
raise AuthException("project {} not allowed for this user".format(project),
http_code=HTTPStatus.UNAUTHORIZED)
# TODO remove admin, this vill be used by roles RBAC
if proj["name"] == "admin":
token_admin = True
else:
token_admin = proj.get("admin", False)
# TODO add token roles - PROVISIONAL. Get this list from user_content["project_role_mappings"]
role_id = self.db.get_one("roles", {"name": "system_admin"})["_id"]
roles_list = [{"name": "system_admin", "id": role_id}]
new_token = {"issued_at": now,
"expires": now + 3600,
"_id": token_id,
"id": token_id,
"project_id": proj["_id"],
"project_name": proj["name"],
"username": user_content["username"],
"user_id": user_content["_id"],
"admin": token_admin,
"roles": roles_list,
}
self.token_cache[token_id] = new_token
self.db.create("tokens", new_token)
return deepcopy(new_token)
except Exception as e:
msg = "Error during user authentication using internal backend: {}".format(e)
self.logger.exception(msg)
raise AuthException(msg, http_code=HTTPStatus.UNAUTHORIZED)
def get_role_list(self):
"""
Get role list.
:return: returns the list of roles.
"""
try:
role_list = self.db.get_list("roles")
roles = [{"name": role["name"], | ""
Invalidate a token.
:param token: token to be revoked
"""
try:
self.token_cache.pop(token, None)
self.db.del_one("tokens", {"_id": token})
return True
except DbException as e:
if e.http_code == HTTPStatus.NOT_FOUND:
raise AuthException("Token '{}' not found".format(token), http_code=HTTPStatus.NOT_FOUND)
else:
# raise
msg = "Error during token revocation using internal backend"
self.logger.exception(msg)
raise AuthException(msg, http_code=HTTPStatus.UNAUTHORIZED)
| identifier_body |
authconn_internal.py | applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# For those usages not covered by the Apache License, Version 2.0 please
# contact: [email protected] or [email protected]
##
"""
AuthconnInternal implements implements the connector for
OSM Internal Authentication Backend and leverages the RBAC model
"""
__author__ = "Pedro de la Cruz Ramos <[email protected]>"
__date__ = "$06-jun-2019 11:16:08$"
from authconn import Authconn, AuthException
from osm_common.dbbase import DbException
from base_topic import BaseTopic
import logging
from time import time
from http import HTTPStatus
from uuid import uuid4
from hashlib import sha256
from copy import deepcopy
from random import choice as random_choice
class AuthconnInternal(Authconn):
def __init__(self, config, db, token_cache):
Authconn.__init__(self, config)
self.logger = logging.getLogger("nbi.authenticator.internal")
# Get Configuration
# self.xxx = config.get("xxx", "default")
self.db = db
self.token_cache = token_cache
# To be Confirmed
self.auth = None
self.sess = None
# def create_token (self, user, password, projects=[], project=None, remote=None):
# Not Required
# def authenticate_with_user_password(self, user, password, project=None, remote=None):
# Not Required
# def authenticate_with_token(self, token, project=None, remote=None):
# Not Required
# def get_user_project_list(self, token):
# Not Required
# def get_user_role_list(self, token):
# Not Required
# def create_user(self, user, password):
# Not Required
# def change_password(self, user, new_password):
# Not Required
# def delete_user(self, user_id):
# Not Required
# def get_user_list(self, filter_q={}):
# Not Required
# def get_project_list(self, filter_q={}):
# Not required
# def create_project(self, project):
# Not required
# def delete_project(self, project_id):
# Not required
# def assign_role_to_user(self, user, project, role):
# Not required in Phase 1
# def remove_role_from_user(self, user, project, role):
# Not required in Phase 1
def validate_token(self, token):
"""
Check if the token is valid.
:param token: token to validate
:return: dictionary with information associated with the token:
"_id": token id
"project_id": project id
"project_name": project name
"user_id": user id
"username": user name
"roles": list with dict containing {name, id}
"expires": expiration date
If the token is not valid an exception is raised.
"""
try:
if not token:
raise AuthException("Needed a token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED)
# try to get from cache first
now = time()
token_info = self.token_cache.get(token)
if token_info and token_info["expires"] < now:
# delete token. MUST be done with care, as another thread maybe already delete it. Do not use del
self.token_cache.pop(token, None)
token_info = None
# get from database if not in cache
if not token_info:
token_info = self.db.get_one("tokens", {"_id": token})
if token_info["expires"] < now:
raise AuthException("Expired Token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED)
return token_info
except DbException as e:
if e.http_code == HTTPStatus.NOT_FOUND:
raise AuthException("Invalid Token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED)
else:
raise
except AuthException:
if self.config["global"].get("test.user_not_authorized"):
return {"id": "fake-token-id-for-test",
"project_id": self.config["global"].get("test.project_not_authorized", "admin"),
"username": self.config["global"]["test.user_not_authorized"], "admin": True}
else:
raise
except Exception:
self.logger.exception("Error during token validation using internal backend")
raise AuthException("Error during token validation using internal backend",
http_code=HTTPStatus.UNAUTHORIZED)
def revoke_token(self, token):
"""
Invalidate a token.
:param token: token to be revoked
"""
try:
self.token_cache.pop(token, None)
self.db.del_one("tokens", {"_id": token})
return True
except DbException as e:
if e.http_code == HTTPStatus.NOT_FOUND:
r | else:
# raise
msg = "Error during token revocation using internal backend"
self.logger.exception(msg)
raise AuthException(msg, http_code=HTTPStatus.UNAUTHORIZED)
def authenticate(self, user, password, project=None, token_info=None):
"""
Authenticate a user using username/password or previous token_info plus project; its creates a new token
:param user: user: name, id or None
:param password: password or None
:param project: name, id, or None. If None first found project will be used to get an scope token
:param token_info: previous token_info to obtain authorization
:param remote: remote host information
:return: the scoped token info or raises an exception. The token is a dictionary with:
_id: token string id,
username: username,
project_id: scoped_token project_id,
project_name: scoped_token project_name,
expires: epoch time when it expires,
"""
now = time()
user_content = None
try:
# Try using username/password
if user:
user_rows = self.db.get_list("users", {BaseTopic.id_field("users", user): user})
if user_rows:
user_content = user_rows[0]
salt = user_content["_admin"]["salt"]
shadow_password = sha256(password.encode('utf-8') + salt.encode('utf-8')).hexdigest()
if shadow_password != user_content["password"]:
user_content = None
if not user_content:
raise AuthException("Invalid username/password", http_code=HTTPStatus.UNAUTHORIZED)
elif token_info:
user_rows = self.db.get_list("users", {"username": token_info["username"]})
if user_rows:
user_content = user_rows[0]
else:
raise AuthException("Invalid token", http_code=HTTPStatus.UNAUTHORIZED)
else:
raise AuthException("Provide credentials: username/password or Authorization Bearer token",
http_code=HTTPStatus.UNAUTHORIZED)
token_id = ''.join(random_choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
for _ in range(0, 32))
# TODO when user contained project_role_mappings with project_id,project_ name this checking to
# database will not be needed
if not project:
project = user_content["projects"][0]
# To allow project names in project_id
proj = self.db.get_one("projects", {BaseTopic.id_field("projects", project): project})
if proj["_id"] not in user_content["projects"] and proj["name"] not in user_content["projects"]:
raise AuthException("project {} not allowed for this user".format(project),
http_code=HTTPStatus.UNAUTHORIZED)
# TODO remove admin, this vill be used by roles RBAC
if proj["name"] == "admin":
token_admin = True
else:
token_admin = proj.get("admin", False)
# TODO add token roles - PROVISIONAL. Get this list from user_content["project_role_mappings"]
role_id = self.db.get_one("roles", {"name": "system_admin"})["_id"]
roles_list = [{"name": "system_admin", "id": role_id}]
new_token = {"issued_at": now,
"expires": now + 3600,
"_id": token_id,
"id": token_id,
"project_id": proj["_id"],
"project_name": proj["name"],
"username": user_content["username"],
"user_id": user_content["_id"],
"admin": token_admin,
"roles": roles_list,
}
self.token_cache[token_id] = new_token
self.db.create("tokens", new_token)
return deepcopy(new_token)
except Exception as e:
msg = "Error during user authentication using internal backend: {}".format(e)
self.logger.exception(msg)
raise AuthException(msg, http_code=HTTPStatus.UNAUTHORIZED)
def get_role_list(self):
"""
Get role list.
:return: returns the list of roles.
"""
try:
role_list = self.db.get_list("roles")
roles = [{"name": role["name"], "_ | aise AuthException("Token '{}' not found".format(token), http_code=HTTPStatus.NOT_FOUND)
| conditional_block |
setup-cluster-images.py | | sudo apt-key add -
echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update -y
sudo apt-get install -y policykit-1 docker-ce
setup_machine_id
sudo dphys-swapfile swapoff
sudo dphys-swapfile uninstall
sudo update-rc.d dphys-swapfile remove
echo "Getting kubernetes packages"
sudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni
sudo /usr/bin/raspi-config --expand-rootfs
"""
SETUP_SCRIPT = """
if [[ -e /boot/setup.txt ]] ; then
tmp=`mktemp`
mv /boot/setup.txt "$tmp"
sh -x "/%s" "$tmp" >/boot/setup.log 2>&1
rm -f "$tmp"
fi
""" % SETUP_NODE_SH
def absjoin(*params):
return abspath(join(*params))
# FIXME - add comments to the methods
class ClusterSetup:
def __call__(self, archive, node_names, targetdir, ipbase):
targetinfo = stat(targetdir)
with self._mktemp():
info('Download cfssl')
cfssldir = abspath('cfssl')
self._download_cfssl(cfssldir)
ipaddress = ipbase
for name in node_names:
node_image = absjoin(targetdir, '%s.img' % name)
info('prepare image for node %s in %s' % (name, node_image))
info('Unpacking archive %s' % archive)
self._unzip(archive, node_image)
try:
self._prepare_node_image(node_image, name, node_names[0], ipaddress, cfssldir)
except Exception as e:
unlink(node_image)
raise
chown(node_image, targetinfo.st_uid, targetinfo.st_gid)
ipaddress = self._increment_ip(ipaddress)
info('done')
def _setup_cgroups(self):
debug('setup cgrops in %s' % getcwd())
with open(absjoin('boot', 'cmdline.txt'), 'a') as cmdline:
cmdline.write('cgroup_enable=cpuset cgroup_memory=1')
def _enable_ssh(self):
debug('enable ssh in %s' % getcwd())
with open(absjoin('boot', 'ssh'), 'w') as ssh:
ssh.write('')
def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl):
with self._mount(image):
self._setup_nodename(master, nodename)
self._enable_ssh()
self._setup_cgroups()
debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN))
self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN))
self._init_first_boot(ipadddress, nodename)
def _copytree(self, srcdir, dstdir):
for f in listdir(srcdir):
copy2(absjoin(srcdir, f), dstdir)
def _setup_nodename(self, master, nodename):
debug('setup nodename %s in %s' % (nodename, getcwd()))
with open(absjoin('system', 'etc', 'hostname'), 'w') as hostname:
print(nodename, file=hostname)
with open(absjoin('system', 'etc', 'hosts'), 'w') as hosts:
print('127.0.1.1 %(nodename)s' % locals(), file=hosts)
if nodename != master:
print('10.0.0.1 %(master)s' % locals(), file=hosts)
def _init_first_boot(self, ipadddress, nodename):
debug('Prepare first boot in %s' % getcwd())
with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:
self.create_setup_script(fname)
with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:
self.setup_rclocal(rclocal)
self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress, nodename)
def create_setup_script(self, setup_node_sh):
with open(setup_node_sh, 'x') as setup_node:
print(PKG_SETUP % locals(), file=setup_node)
def setup_rclocal(self, rc_local):
with open(rc_local, 'r+') as script:
script.write(self._edit(script.read(), SETUP_SCRIPT))
def _create_setup_txt(self, fname, ipadddress, nodename):
with open(fname, 'w') as setup:
print('nodename=%s' % nodename, file=setup)
print('ip=%s' % ipadddress, file=setup)
def _edit(self, setup_script, setup_node_sh):
lines = [l.rstrip() for l in setup_script.splitlines()]
if 'exit 0' in lines:
exit_line = lines.index('exit 0')
lines.insert(exit_line, setup_node_sh)
else:
lines.append(setup_node_sh)
lines.append('exit 0')
return '\n'.join(lines)
def _download_cfssl(self, dstdir):
if not isdir(dstdir):
makedirs(dstdir)
for line in CFSSL_PROGS_SHA256.splitlines():
if line:
checksum, fname = line.split()
dstfile = absjoin(dstdir, fname)
self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' % fname, dstfile, checksum)
chmod(dstfile, 0o755)
def _download(self, url, dstfile, checksum):
request.urlretrieve(url, dstfile)
m = sha256()
with open(dstfile, 'rb') as f:
hash = m.update(f.read())
if checksum != m.hexdigest():
raise RuntimeError('Checksum of %s does not match!' % dstfile)
@staticmethod
def _unzip(archive, dst_image):
with ZipFile(archive) as image_archive:
for name in image_archive.namelist():
if name.endswith('.img'):
image = image_archive.extract(name, dirname(dst_image))
if isfile(dst_image):
unlink(dst_image)
rename(image, dst_image)
return dst_image
raise RuntimeError('No image file contained in archive %s' % archive)
@contextmanager
def _mktemp(self):
here = getcwd()
tempdir = mkdtemp()
try:
chdir(tempdir)
yield tempdir, here
finally:
chdir(here)
rmtree(tempdir)
@contextmanager
def _mount(self, image):
with self._kpartx(abspath(image)) as nodes:
with self._mktemp() as (here, cwd):
for d in nodes.keys():
mkdir(d)
boot = abspath('boot')
system = abspath('system')
with self._mounted(nodes['boot'], boot) as boot:
with self._mounted(nodes['system'], system) as system:
chdir(here)
yield boot, system
@contextmanager
def _kpartx(self, image):
output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image), universal_newlines=True)
# $ sudo kpartx -a -v -s 2018-03-13-raspbian-stretch-lite.img
# add map loop1p1 (252:7): 0 85611 linear /dev/loop1 8192
# add map loop1p2 (252:8): 0 3530752 linear /dev/loop1 98304
try:
nodes = []
for l in output.splitlines():
if l:
fields = l.split()
nodes.append((fields[2], fields[5]))
assert len(nodes) == 2
# sort nodes by size - the smaller node is 'boot'
nodes.sort(key=lambda t: t[1], reverse=True)
yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': '/dev/mapper/%s' % nodes[1][0]}
finally:
check_call(('sudo', 'kpartx', '-d', '-s', image))
@contextmanager
def _mounted(self, mapping, mountpoint):
try:
debug('mount %s on %s' % (mapping, mountpoint))
check_call(('sudo', 'mount', mapping, mountpoint))
yield mountpoint
finally:
check_call(('sudo', 'umount', mountpoint))
@contextmanager
def _executable(self, param):
yield param
chmod(param, 0o755)
def _increment_ip(self, ipbase):
octets = [int(o) for o in ipbase.split('.')]
octets[3] += 1
return '.'.join([str(o) for o in octets])
def _check_ip(param):
octets = [int(o) for o in param.split('.')]
for o in octets:
if 0 <= o <= 255:
| continue | conditional_block |
|
setup-cluster-images.py | -e /boot/setup.txt ]] ; then
tmp=`mktemp`
mv /boot/setup.txt "$tmp"
sh -x "/%s" "$tmp" >/boot/setup.log 2>&1
rm -f "$tmp"
fi
""" % SETUP_NODE_SH
def absjoin(*params):
return abspath(join(*params))
# FIXME - add comments to the methods
class ClusterSetup:
def __call__(self, archive, node_names, targetdir, ipbase):
targetinfo = stat(targetdir)
with self._mktemp():
info('Download cfssl')
cfssldir = abspath('cfssl')
self._download_cfssl(cfssldir)
ipaddress = ipbase
for name in node_names:
node_image = absjoin(targetdir, '%s.img' % name)
info('prepare image for node %s in %s' % (name, node_image))
info('Unpacking archive %s' % archive)
self._unzip(archive, node_image)
try:
self._prepare_node_image(node_image, name, node_names[0], ipaddress, cfssldir)
except Exception as e:
unlink(node_image)
raise
chown(node_image, targetinfo.st_uid, targetinfo.st_gid)
ipaddress = self._increment_ip(ipaddress)
info('done')
def _setup_cgroups(self):
debug('setup cgrops in %s' % getcwd())
with open(absjoin('boot', 'cmdline.txt'), 'a') as cmdline:
cmdline.write('cgroup_enable=cpuset cgroup_memory=1')
def _enable_ssh(self):
debug('enable ssh in %s' % getcwd())
with open(absjoin('boot', 'ssh'), 'w') as ssh:
ssh.write('')
def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl):
with self._mount(image):
self._setup_nodename(master, nodename)
self._enable_ssh()
self._setup_cgroups()
debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN))
self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN))
self._init_first_boot(ipadddress, nodename)
def _copytree(self, srcdir, dstdir):
for f in listdir(srcdir):
copy2(absjoin(srcdir, f), dstdir)
def _setup_nodename(self, master, nodename):
debug('setup nodename %s in %s' % (nodename, getcwd()))
with open(absjoin('system', 'etc', 'hostname'), 'w') as hostname:
print(nodename, file=hostname)
with open(absjoin('system', 'etc', 'hosts'), 'w') as hosts:
print('127.0.1.1 %(nodename)s' % locals(), file=hosts)
if nodename != master:
print('10.0.0.1 %(master)s' % locals(), file=hosts)
def _init_first_boot(self, ipadddress, nodename):
debug('Prepare first boot in %s' % getcwd())
with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:
self.create_setup_script(fname)
with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:
self.setup_rclocal(rclocal)
self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress, nodename)
def create_setup_script(self, setup_node_sh):
with open(setup_node_sh, 'x') as setup_node:
print(PKG_SETUP % locals(), file=setup_node)
def setup_rclocal(self, rc_local):
with open(rc_local, 'r+') as script:
script.write(self._edit(script.read(), SETUP_SCRIPT))
def _create_setup_txt(self, fname, ipadddress, nodename):
with open(fname, 'w') as setup:
print('nodename=%s' % nodename, file=setup)
print('ip=%s' % ipadddress, file=setup)
def _edit(self, setup_script, setup_node_sh):
lines = [l.rstrip() for l in setup_script.splitlines()]
if 'exit 0' in lines:
exit_line = lines.index('exit 0')
lines.insert(exit_line, setup_node_sh)
else:
lines.append(setup_node_sh)
lines.append('exit 0')
return '\n'.join(lines)
def _download_cfssl(self, dstdir):
if not isdir(dstdir):
makedirs(dstdir)
for line in CFSSL_PROGS_SHA256.splitlines():
if line:
checksum, fname = line.split()
dstfile = absjoin(dstdir, fname)
self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' % fname, dstfile, checksum)
chmod(dstfile, 0o755)
def _download(self, url, dstfile, checksum):
request.urlretrieve(url, dstfile)
m = sha256()
with open(dstfile, 'rb') as f:
hash = m.update(f.read())
if checksum != m.hexdigest():
raise RuntimeError('Checksum of %s does not match!' % dstfile)
@staticmethod
def _unzip(archive, dst_image):
with ZipFile(archive) as image_archive:
for name in image_archive.namelist():
if name.endswith('.img'):
image = image_archive.extract(name, dirname(dst_image))
if isfile(dst_image):
unlink(dst_image)
rename(image, dst_image)
return dst_image
raise RuntimeError('No image file contained in archive %s' % archive)
@contextmanager
def _mktemp(self):
here = getcwd()
tempdir = mkdtemp()
try:
chdir(tempdir)
yield tempdir, here
finally:
chdir(here)
rmtree(tempdir)
@contextmanager
def _mount(self, image):
with self._kpartx(abspath(image)) as nodes:
with self._mktemp() as (here, cwd):
for d in nodes.keys():
mkdir(d)
boot = abspath('boot')
system = abspath('system')
with self._mounted(nodes['boot'], boot) as boot:
with self._mounted(nodes['system'], system) as system:
chdir(here)
yield boot, system
@contextmanager
def _kpartx(self, image):
output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image), universal_newlines=True)
# $ sudo kpartx -a -v -s 2018-03-13-raspbian-stretch-lite.img
# add map loop1p1 (252:7): 0 85611 linear /dev/loop1 8192
# add map loop1p2 (252:8): 0 3530752 linear /dev/loop1 98304
try:
nodes = []
for l in output.splitlines():
if l:
fields = l.split()
nodes.append((fields[2], fields[5]))
assert len(nodes) == 2
# sort nodes by size - the smaller node is 'boot'
nodes.sort(key=lambda t: t[1], reverse=True)
yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': '/dev/mapper/%s' % nodes[1][0]}
finally:
check_call(('sudo', 'kpartx', '-d', '-s', image))
@contextmanager
def _mounted(self, mapping, mountpoint):
try:
debug('mount %s on %s' % (mapping, mountpoint))
check_call(('sudo', 'mount', mapping, mountpoint))
yield mountpoint
finally:
check_call(('sudo', 'umount', mountpoint))
@contextmanager
def _executable(self, param):
yield param
chmod(param, 0o755)
def _increment_ip(self, ipbase):
octets = [int(o) for o in ipbase.split('.')]
octets[3] += 1
return '.'.join([str(o) for o in octets])
def _check_ip(param):
octets = [int(o) for o in param.split('.')]
for o in octets:
if 0 <= o <= 255:
continue
raise RuntimeError('Invalid IP address: %s' % param)
return param
def main(*args):
| targetdir = getcwd() if len(args) < 4 else args[3]
nodenames = prepare_names(
NODE_COUNT if len(args) < 2 else int(args[1]),
NODE_PREFIX if len(args) < 3 else args[2])
ipaddress = BASE_IP if len(args) < 5 else _check_ip(args[4])
raspbian_archive = abspath(args[0])
setup = ClusterSetup()
setup(raspbian_archive, nodenames, targetdir, ipaddress) | identifier_body |
|
setup-cluster-images.py | e849565cd7d27ac2daf68faa835a5151fd3feac87c6715bcb92d58dc280 cfssl-certinfo
4106c11c61aa9e98b1967adab6db711d2b50a0f02f844329e9ad44f199bdf135 cfssl-newkey
71e41ef447f49ad236d75ec42152625c6fcf6c37122784740bd19b0a7c399560 cfssl-scan
11c708acaf48a69abf6f896f5c6158f7547a3c1bf44e14ca3b3ab440c1f808f1 cfssl
e138102329d96f5a67aa168034a256a8376febf4ecde7b8e837c3f2e08b1cd19 cfssljson
dac738390bc346b94c497b62a82f75cb05f0dafd5dad7d9dd63dedb9bc31092a mkbundle
d53bbc0d2ac2d57c089d4f730d9e7b2d365701adc1bb417153a5f70a16bd10d6 multirootca
"""
# Shell script to setup the necessary software for kubernetes
# FIXME - howto add a static IP
# TODO - add static certificates
# TODO - add kubeadm call for master
PKG_SETUP = """\
#!/bin/sh
setup_params="$1"
setup_machine_id() {
sudo rm -f /etc/machine-id /var/lib/dbus/machine-id
sudo dbus-uuidgen --ensure=/etc/machine-id
}
setup_static_ip() {
}
set -e
nodename=`awk -F= '/^nodename=/ { print $2 }' "$setup_params"`
ipaddress=`awk -F= '/^ip=/ { print $2 }' "$setup_params"`
sudo hostname "$nodename"
setup_static_ip "$ipaddress"
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update -y
sudo apt-get install -y policykit-1 docker-ce
setup_machine_id
sudo dphys-swapfile swapoff
sudo dphys-swapfile uninstall
sudo update-rc.d dphys-swapfile remove
echo "Getting kubernetes packages"
sudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni
sudo /usr/bin/raspi-config --expand-rootfs
"""
SETUP_SCRIPT = """
if [[ -e /boot/setup.txt ]] ; then
tmp=`mktemp`
mv /boot/setup.txt "$tmp"
sh -x "/%s" "$tmp" >/boot/setup.log 2>&1
rm -f "$tmp"
fi
""" % SETUP_NODE_SH
def absjoin(*params):
return abspath(join(*params))
# FIXME - add comments to the methods
class ClusterSetup:
def __call__(self, archive, node_names, targetdir, ipbase):
targetinfo = stat(targetdir)
with self._mktemp():
info('Download cfssl')
cfssldir = abspath('cfssl')
self._download_cfssl(cfssldir)
ipaddress = ipbase
for name in node_names:
node_image = absjoin(targetdir, '%s.img' % name)
info('prepare image for node %s in %s' % (name, node_image))
info('Unpacking archive %s' % archive)
self._unzip(archive, node_image)
try:
self._prepare_node_image(node_image, name, node_names[0], ipaddress, cfssldir)
except Exception as e:
unlink(node_image)
raise
chown(node_image, targetinfo.st_uid, targetinfo.st_gid)
ipaddress = self._increment_ip(ipaddress)
info('done')
def _setup_cgroups(self):
debug('setup cgrops in %s' % getcwd())
with open(absjoin('boot', 'cmdline.txt'), 'a') as cmdline:
cmdline.write('cgroup_enable=cpuset cgroup_memory=1')
def _enable_ssh(self):
debug('enable ssh in %s' % getcwd())
with open(absjoin('boot', 'ssh'), 'w') as ssh:
ssh.write('')
def | (self, image, nodename, master, ipadddress, cfssl):
with self._mount(image):
self._setup_nodename(master, nodename)
self._enable_ssh()
self._setup_cgroups()
debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN))
self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN))
self._init_first_boot(ipadddress, nodename)
def _copytree(self, srcdir, dstdir):
for f in listdir(srcdir):
copy2(absjoin(srcdir, f), dstdir)
def _setup_nodename(self, master, nodename):
debug('setup nodename %s in %s' % (nodename, getcwd()))
with open(absjoin('system', 'etc', 'hostname'), 'w') as hostname:
print(nodename, file=hostname)
with open(absjoin('system', 'etc', 'hosts'), 'w') as hosts:
print('127.0.1.1 %(nodename)s' % locals(), file=hosts)
if nodename != master:
print('10.0.0.1 %(master)s' % locals(), file=hosts)
def _init_first_boot(self, ipadddress, nodename):
debug('Prepare first boot in %s' % getcwd())
with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:
self.create_setup_script(fname)
with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:
self.setup_rclocal(rclocal)
self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress, nodename)
def create_setup_script(self, setup_node_sh):
with open(setup_node_sh, 'x') as setup_node:
print(PKG_SETUP % locals(), file=setup_node)
def setup_rclocal(self, rc_local):
with open(rc_local, 'r+') as script:
script.write(self._edit(script.read(), SETUP_SCRIPT))
def _create_setup_txt(self, fname, ipadddress, nodename):
with open(fname, 'w') as setup:
print('nodename=%s' % nodename, file=setup)
print('ip=%s' % ipadddress, file=setup)
def _edit(self, setup_script, setup_node_sh):
lines = [l.rstrip() for l in setup_script.splitlines()]
if 'exit 0' in lines:
exit_line = lines.index('exit 0')
lines.insert(exit_line, setup_node_sh)
else:
lines.append(setup_node_sh)
lines.append('exit 0')
return '\n'.join(lines)
def _download_cfssl(self, dstdir):
if not isdir(dstdir):
makedirs(dstdir)
for line in CFSSL_PROGS_SHA256.splitlines():
if line:
checksum, fname = line.split()
dstfile = absjoin(dstdir, fname)
self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' % fname, dstfile, checksum)
chmod(dstfile, 0o755)
def _download(self, url, dstfile, checksum):
request.urlretrieve(url, dstfile)
m = sha256()
with open(dstfile, 'rb') as f:
hash = m.update(f.read())
if checksum != m.hexdigest():
raise RuntimeError('Checksum of %s does not match!' % dstfile)
@staticmethod
def _unzip(archive, dst_image):
with ZipFile(archive) as image_archive:
for name in image_archive.namelist():
if name.endswith('.img'):
image = image_archive.extract(name, dirname(dst_image))
if isfile(dst_image):
unlink(dst_image)
rename(image, dst_image)
return dst_image
raise RuntimeError('No image file contained in archive %s' % archive)
@contextmanager
def _mktemp(self):
here = getcwd()
tempdir = mkdtemp()
try:
chdir(temp | _prepare_node_image | identifier_name |
setup-cluster-images.py | e849565cd7d27ac2daf68faa835a5151fd3feac87c6715bcb92d58dc280 cfssl-certinfo
4106c11c61aa9e98b1967adab6db711d2b50a0f02f844329e9ad44f199bdf135 cfssl-newkey
71e41ef447f49ad236d75ec42152625c6fcf6c37122784740bd19b0a7c399560 cfssl-scan
11c708acaf48a69abf6f896f5c6158f7547a3c1bf44e14ca3b3ab440c1f808f1 cfssl
e138102329d96f5a67aa168034a256a8376febf4ecde7b8e837c3f2e08b1cd19 cfssljson
dac738390bc346b94c497b62a82f75cb05f0dafd5dad7d9dd63dedb9bc31092a mkbundle
d53bbc0d2ac2d57c089d4f730d9e7b2d365701adc1bb417153a5f70a16bd10d6 multirootca
"""
# Shell script to setup the necessary software for kubernetes
# FIXME - howto add a static IP
# TODO - add static certificates
# TODO - add kubeadm call for master
PKG_SETUP = """\
#!/bin/sh
setup_params="$1"
setup_machine_id() {
sudo rm -f /etc/machine-id /var/lib/dbus/machine-id
sudo dbus-uuidgen --ensure=/etc/machine-id
}
setup_static_ip() {
}
set -e
nodename=`awk -F= '/^nodename=/ { print $2 }' "$setup_params"`
ipaddress=`awk -F= '/^ip=/ { print $2 }' "$setup_params"`
sudo hostname "$nodename"
setup_static_ip "$ipaddress"
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update -y
sudo apt-get install -y policykit-1 docker-ce
setup_machine_id
sudo dphys-swapfile swapoff
sudo dphys-swapfile uninstall
sudo update-rc.d dphys-swapfile remove
echo "Getting kubernetes packages"
sudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni
sudo /usr/bin/raspi-config --expand-rootfs
"""
SETUP_SCRIPT = """
if [[ -e /boot/setup.txt ]] ; then
tmp=`mktemp`
mv /boot/setup.txt "$tmp"
sh -x "/%s" "$tmp" >/boot/setup.log 2>&1
rm -f "$tmp"
fi
""" % SETUP_NODE_SH
def absjoin(*params):
return abspath(join(*params))
# FIXME - add comments to the methods
class ClusterSetup:
def __call__(self, archive, node_names, targetdir, ipbase):
targetinfo = stat(targetdir)
with self._mktemp():
info('Download cfssl')
cfssldir = abspath('cfssl')
self._download_cfssl(cfssldir)
ipaddress = ipbase
for name in node_names:
node_image = absjoin(targetdir, '%s.img' % name)
info('prepare image for node %s in %s' % (name, node_image))
info('Unpacking archive %s' % archive)
self._unzip(archive, node_image)
try:
self._prepare_node_image(node_image, name, node_names[0], ipaddress, cfssldir)
except Exception as e:
unlink(node_image)
raise
chown(node_image, targetinfo.st_uid, targetinfo.st_gid) | def _setup_cgroups(self):
debug('setup cgrops in %s' % getcwd())
with open(absjoin('boot', 'cmdline.txt'), 'a') as cmdline:
cmdline.write('cgroup_enable=cpuset cgroup_memory=1')
def _enable_ssh(self):
debug('enable ssh in %s' % getcwd())
with open(absjoin('boot', 'ssh'), 'w') as ssh:
ssh.write('')
def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl):
with self._mount(image):
self._setup_nodename(master, nodename)
self._enable_ssh()
self._setup_cgroups()
debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN))
self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN))
self._init_first_boot(ipadddress, nodename)
def _copytree(self, srcdir, dstdir):
for f in listdir(srcdir):
copy2(absjoin(srcdir, f), dstdir)
def _setup_nodename(self, master, nodename):
debug('setup nodename %s in %s' % (nodename, getcwd()))
with open(absjoin('system', 'etc', 'hostname'), 'w') as hostname:
print(nodename, file=hostname)
with open(absjoin('system', 'etc', 'hosts'), 'w') as hosts:
print('127.0.1.1 %(nodename)s' % locals(), file=hosts)
if nodename != master:
print('10.0.0.1 %(master)s' % locals(), file=hosts)
def _init_first_boot(self, ipadddress, nodename):
debug('Prepare first boot in %s' % getcwd())
with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:
self.create_setup_script(fname)
with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:
self.setup_rclocal(rclocal)
self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress, nodename)
def create_setup_script(self, setup_node_sh):
with open(setup_node_sh, 'x') as setup_node:
print(PKG_SETUP % locals(), file=setup_node)
def setup_rclocal(self, rc_local):
with open(rc_local, 'r+') as script:
script.write(self._edit(script.read(), SETUP_SCRIPT))
def _create_setup_txt(self, fname, ipadddress, nodename):
with open(fname, 'w') as setup:
print('nodename=%s' % nodename, file=setup)
print('ip=%s' % ipadddress, file=setup)
def _edit(self, setup_script, setup_node_sh):
lines = [l.rstrip() for l in setup_script.splitlines()]
if 'exit 0' in lines:
exit_line = lines.index('exit 0')
lines.insert(exit_line, setup_node_sh)
else:
lines.append(setup_node_sh)
lines.append('exit 0')
return '\n'.join(lines)
def _download_cfssl(self, dstdir):
if not isdir(dstdir):
makedirs(dstdir)
for line in CFSSL_PROGS_SHA256.splitlines():
if line:
checksum, fname = line.split()
dstfile = absjoin(dstdir, fname)
self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' % fname, dstfile, checksum)
chmod(dstfile, 0o755)
def _download(self, url, dstfile, checksum):
request.urlretrieve(url, dstfile)
m = sha256()
with open(dstfile, 'rb') as f:
hash = m.update(f.read())
if checksum != m.hexdigest():
raise RuntimeError('Checksum of %s does not match!' % dstfile)
@staticmethod
def _unzip(archive, dst_image):
with ZipFile(archive) as image_archive:
for name in image_archive.namelist():
if name.endswith('.img'):
image = image_archive.extract(name, dirname(dst_image))
if isfile(dst_image):
unlink(dst_image)
rename(image, dst_image)
return dst_image
raise RuntimeError('No image file contained in archive %s' % archive)
@contextmanager
def _mktemp(self):
here = getcwd()
tempdir = mkdtemp()
try:
chdir(temp | ipaddress = self._increment_ip(ipaddress)
info('done')
| random_line_split |
lib.rs | Exonum nodes.
//!
//! `exonum-cli` supports multi-stage configuration process made with safety in mind. It involves
//! 4 steps (or stages) and allows to configure and run multiple blockchain nodes without
//! need in exchanging private keys between administrators.
//!
//! # How to Run the Network
//!
//! 1. Generate common (template) part of the nodes configuration using `generate-template` command.
//! Generated `.toml` file must be spread among all the nodes and must be used in the following
//! configuration step.
//! 2. Generate public and secret (private) parts of the node configuration using `generate-config`
//! command. At this step, Exonum will generate master key from which consensus and service
//! validator keys are derived. Master key is stored in the encrypted file. Consensus secret key
//! is used for communications between the nodes, while service secret key is used
//! mainly to sign transactions generated by the node. Both secret keys may be encrypted with a
//! password. The public part of the node configuration must be spread among all nodes, while the
//! secret part must be only accessible by the node administrator only.
//! 3. Generate final node configuration using `finalize` command. Exonum combines secret part of
//! the node configuration with public configurations of every other node, producing a single
//! configuration file with all the necessary node and network settings.
//! 4. Use `run` command and provide it with final node configuration file produced at the previous
//! step. If the secret keys are protected with passwords, the user need to enter the password.
//! Running node will automatically connect to other nodes in the network using IP addresses from
//! public parts of the node configurations.
//!
//! ## Additional Commands
//!
//! `exonum-cli` also supports additional CLI commands for performing maintenance actions by node
//! administrators and easier debugging.
//!
//! * `run-dev` command automatically generates network configuration with a single node and runs
//! it. This command can be useful for fast testing of the services during development process.
//! * `maintenance` command allows to clear node's consensus messages with `clear-cache`, and
//! restart node's service migration script with `restart-migration`.
//!
//! ## How to Extend Parameters
//!
//! `exonum-cli` allows to extend the list of the parameters for any command and even add new CLI
//! commands with arbitrary behavior. To do so, you need to implement a structure with a list of
//! additional parameters and use `flatten` macro attribute of [`serde`][serde] and
//! [`structopt`][structopt] libraries.
//!
//! ```ignore
//! #[derive(Serialize, Deserialize, StructOpt)]
//! struct MyRunCommand {
//! #[serde(flatten)]
//! #[structopt(flatten)]
//! default: Run
//! /// My awesome parameter
//! secret_number: i32
//! }
//! ```
//!
//! You can also create own list of commands by implementing an enum with a similar principle:
//!
//! ```ignore
//! #[derive(StructOpt)]
//! enum MyCommands {
//! #[structopt(name = "run")
//! DefaultRun(Run),
//! #[structopt(name = "my-run")
//! MyAwesomeRun(MyRunCommand),
//! }
//! ```
//!
//! While implementing custom behavior for your commands, you may use
//! [`StandardResult`](./command/enum.StandardResult.html) enum for
//! accessing node configuration files created and filled by the standard Exonum commands.
//!
//! [serde]: https://crates.io/crates/serde
//! [structopt]: https://crates.io/crates/structopt
#![deny(missing_docs)]
pub use crate::config_manager::DefaultConfigManager;
pub use structopt;
use exonum::{
blockchain::config::{GenesisConfig, GenesisConfigBuilder, InstanceInitParams},
merkledb::RocksDB,
runtime::{RuntimeInstance, WellKnownRuntime},
};
use exonum_explorer_service::ExplorerFactory;
use exonum_node::{Node, NodeBuilder as CoreNodeBuilder};
use exonum_rust_runtime::{DefaultInstance, RustRuntimeBuilder, ServiceFactory};
use exonum_supervisor::{Supervisor, SupervisorConfig};
use exonum_system_api::SystemApiPlugin;
use structopt::StructOpt;
use tempfile::TempDir;
use std::{env, ffi::OsString, iter, path::PathBuf};
use crate::command::{run::NodeRunConfig, Command, ExonumCommand, StandardResult};
pub mod command;
pub mod config;
pub mod io;
pub mod password;
mod config_manager;
/// Rust-specific node builder used for constructing a node with a list
/// of provided services.
#[derive(Debug)]
pub struct NodeBuilder {
rust_runtime: RustRuntimeBuilder,
external_runtimes: Vec<RuntimeInstance>,
builtin_instances: Vec<InstanceInitParams>,
args: Option<Vec<OsString>>,
temp_dir: Option<TempDir>,
}
impl Default for NodeBuilder {
fn default() -> Self {
Self::new()
}
}
impl NodeBuilder {
/// Creates a new builder.
pub fn new() -> Self {
Self {
rust_runtime: RustRuntimeBuilder::new()
.with_factory(Supervisor)
.with_factory(ExplorerFactory),
external_runtimes: vec![],
builtin_instances: vec![],
args: None,
temp_dir: None,
}
}
/// Creates a new builder with the provided command-line arguments. The path
/// to the current executable **does not** need to be specified as the first argument.
#[doc(hidden)] // unstable
pub fn with_args<I>(args: I) -> Self
where
I: IntoIterator,
I::Item: Into<OsString>,
{
let mut this = Self::new();
let executable = env::current_exe()
.map(PathBuf::into_os_string)
.unwrap_or_else(|_| "node".into());
let all_args = iter::once(executable)
.chain(args.into_iter().map(Into::into))
.collect();
this.args = Some(all_args);
this
}
/// Creates a single-node development network with default settings. The node stores
/// its data in a temporary directory, which is automatically removed when the node is stopped.
///
/// # Return value
///
/// Returns an error if the temporary directory cannot be created.
pub fn development_node() -> Result<Self, failure::Error> {
let temp_dir = TempDir::new()?;
let mut this = Self::with_args(vec![
OsString::from("run-dev"),
OsString::from("--artifacts-dir"),
temp_dir.path().into(),
]);
this.temp_dir = Some(temp_dir);
Ok(this)
}
/// Adds new Rust service to the list of available services.
pub fn with_rust_service(mut self, service: impl ServiceFactory) -> Self {
self.rust_runtime = self.rust_runtime.with_factory(service);
self
}
/// Adds a new `Runtime` to the list of available runtimes.
///
/// Note that you don't have to add the Rust runtime, since it is included by default.
pub fn with_external_runtime(mut self, runtime: impl WellKnownRuntime) -> Self {
self.external_runtimes.push(runtime.into());
self
}
/// Adds a service instance that will be available immediately after creating a genesis block.
///
/// For Rust services, the service factory needs to be separately supplied
/// via [`with_rust_service`](#method.with_rust_service).
pub fn with_instance(mut self, instance: impl Into<InstanceInitParams>) -> Self {
self.builtin_instances.push(instance.into());
self
}
/// Adds a default Rust service instance that will be available immediately after creating a
/// genesis block.
pub fn with_default_rust_service(self, service: impl DefaultInstance) -> Self {
self.with_instance(service.default_instance())
.with_rust_service(service)
}
/// Executes a command received from the command line.
///
/// # Return value
///
/// Returns:
///
/// - `Ok(Some(_))` if the command lead to the node creation
/// - `Ok(None)` if the command executed successfully and did not lead to node creation
/// - `Err(_)` if an error occurred during command execution
#[doc(hidden)] // unstable
pub fn execute_command(self) -> Result<Option<Node>, failure::Error> {
let command = if let Some(args) = self.args | else {
Command::from_args()
};
if let StandardResult::Run(run_config) = command.execute()? {
let genesis_config = Self::genesis_config(&run_config, self.builtin_instances);
let db_options = &run_config.node_config.private_config.database;
let database = RocksDB::open(run_config.db_path, db_options)?;
let node_config_path = run_config.node_config_path.to_string_lossy();
let config_manager = DefaultConfigManager::new(node_config_path.into_owned());
let rust_runtime = self.rust_runtime;
let node_config = run_config.node_config.into();
let node_keys = run_config.node_keys;
let mut node_builder = CoreNodeBuilder::new(database, node_config, node_keys)
.with_genesis_config(genesis_config)
.with_config_manager(config_manager)
.with_plugin(SystemApiPlugin)
.with_runtime_fn(|channel| rust_runtime.build(channel.endpoints_sender()));
for runtime in self.external_runtimes {
node_builder | {
Command::from_iter(args)
} | conditional_block |
lib.rs | Exonum nodes.
//!
//! `exonum-cli` supports multi-stage configuration process made with safety in mind. It involves
//! 4 steps (or stages) and allows to configure and run multiple blockchain nodes without
//! need in exchanging private keys between administrators.
//!
//! # How to Run the Network
//!
//! 1. Generate common (template) part of the nodes configuration using `generate-template` command.
//! Generated `.toml` file must be spread among all the nodes and must be used in the following
//! configuration step.
//! 2. Generate public and secret (private) parts of the node configuration using `generate-config`
//! command. At this step, Exonum will generate master key from which consensus and service
//! validator keys are derived. Master key is stored in the encrypted file. Consensus secret key
//! is used for communications between the nodes, while service secret key is used
//! mainly to sign transactions generated by the node. Both secret keys may be encrypted with a
//! password. The public part of the node configuration must be spread among all nodes, while the
//! secret part must be only accessible by the node administrator only.
//! 3. Generate final node configuration using `finalize` command. Exonum combines secret part of
//! the node configuration with public configurations of every other node, producing a single
//! configuration file with all the necessary node and network settings.
//! 4. Use `run` command and provide it with final node configuration file produced at the previous
//! step. If the secret keys are protected with passwords, the user need to enter the password.
//! Running node will automatically connect to other nodes in the network using IP addresses from
//! public parts of the node configurations.
//!
//! ## Additional Commands
//!
//! `exonum-cli` also supports additional CLI commands for performing maintenance actions by node
//! administrators and easier debugging.
//!
//! * `run-dev` command automatically generates network configuration with a single node and runs
//! it. This command can be useful for fast testing of the services during development process.
//! * `maintenance` command allows to clear node's consensus messages with `clear-cache`, and
//! restart node's service migration script with `restart-migration`.
//!
//! ## How to Extend Parameters
//!
//! `exonum-cli` allows to extend the list of the parameters for any command and even add new CLI
//! commands with arbitrary behavior. To do so, you need to implement a structure with a list of
//! additional parameters and use `flatten` macro attribute of [`serde`][serde] and
//! [`structopt`][structopt] libraries.
//!
//! ```ignore
//! #[derive(Serialize, Deserialize, StructOpt)]
//! struct MyRunCommand {
//! #[serde(flatten)]
//! #[structopt(flatten)]
//! default: Run
//! /// My awesome parameter
//! secret_number: i32
//! }
//! ```
//!
//! You can also create own list of commands by implementing an enum with a similar principle:
//!
//! ```ignore
//! #[derive(StructOpt)]
//! enum MyCommands {
//! #[structopt(name = "run")
//! DefaultRun(Run),
//! #[structopt(name = "my-run")
//! MyAwesomeRun(MyRunCommand),
//! }
//! ```
//!
//! While implementing custom behavior for your commands, you may use
//! [`StandardResult`](./command/enum.StandardResult.html) enum for
//! accessing node configuration files created and filled by the standard Exonum commands.
//!
//! [serde]: https://crates.io/crates/serde
//! [structopt]: https://crates.io/crates/structopt
#![deny(missing_docs)]
pub use crate::config_manager::DefaultConfigManager;
pub use structopt;
use exonum::{
blockchain::config::{GenesisConfig, GenesisConfigBuilder, InstanceInitParams},
merkledb::RocksDB,
runtime::{RuntimeInstance, WellKnownRuntime},
};
use exonum_explorer_service::ExplorerFactory;
use exonum_node::{Node, NodeBuilder as CoreNodeBuilder};
use exonum_rust_runtime::{DefaultInstance, RustRuntimeBuilder, ServiceFactory};
use exonum_supervisor::{Supervisor, SupervisorConfig};
use exonum_system_api::SystemApiPlugin;
use structopt::StructOpt;
use tempfile::TempDir;
use std::{env, ffi::OsString, iter, path::PathBuf};
use crate::command::{run::NodeRunConfig, Command, ExonumCommand, StandardResult};
pub mod command;
pub mod config;
pub mod io;
pub mod password;
mod config_manager;
/// Rust-specific node builder used for constructing a node with a list
/// of provided services.
#[derive(Debug)]
pub struct NodeBuilder {
rust_runtime: RustRuntimeBuilder,
external_runtimes: Vec<RuntimeInstance>,
builtin_instances: Vec<InstanceInitParams>,
args: Option<Vec<OsString>>,
temp_dir: Option<TempDir>,
}
impl Default for NodeBuilder {
fn default() -> Self {
Self::new()
}
}
impl NodeBuilder {
/// Creates a new builder.
pub fn new() -> Self {
Self {
rust_runtime: RustRuntimeBuilder::new()
.with_factory(Supervisor)
.with_factory(ExplorerFactory),
external_runtimes: vec![],
builtin_instances: vec![],
args: None,
temp_dir: None,
}
}
/// Creates a new builder with the provided command-line arguments. The path
/// to the current executable **does not** need to be specified as the first argument.
#[doc(hidden)] // unstable
pub fn with_args<I>(args: I) -> Self
where
I: IntoIterator,
I::Item: Into<OsString>,
{
let mut this = Self::new();
let executable = env::current_exe()
.map(PathBuf::into_os_string)
.unwrap_or_else(|_| "node".into());
let all_args = iter::once(executable)
.chain(args.into_iter().map(Into::into))
.collect();
this.args = Some(all_args);
this
}
/// Creates a single-node development network with default settings. The node stores
/// its data in a temporary directory, which is automatically removed when the node is stopped.
///
/// # Return value
///
/// Returns an error if the temporary directory cannot be created.
pub fn development_node() -> Result<Self, failure::Error> |
/// Adds new Rust service to the list of available services.
pub fn with_rust_service(mut self, service: impl ServiceFactory) -> Self {
self.rust_runtime = self.rust_runtime.with_factory(service);
self
}
/// Adds a new `Runtime` to the list of available runtimes.
///
/// Note that you don't have to add the Rust runtime, since it is included by default.
pub fn with_external_runtime(mut self, runtime: impl WellKnownRuntime) -> Self {
self.external_runtimes.push(runtime.into());
self
}
/// Adds a service instance that will be available immediately after creating a genesis block.
///
/// For Rust services, the service factory needs to be separately supplied
/// via [`with_rust_service`](#method.with_rust_service).
pub fn with_instance(mut self, instance: impl Into<InstanceInitParams>) -> Self {
self.builtin_instances.push(instance.into());
self
}
/// Adds a default Rust service instance that will be available immediately after creating a
/// genesis block.
pub fn with_default_rust_service(self, service: impl DefaultInstance) -> Self {
self.with_instance(service.default_instance())
.with_rust_service(service)
}
/// Executes a command received from the command line.
///
/// # Return value
///
/// Returns:
///
/// - `Ok(Some(_))` if the command lead to the node creation
/// - `Ok(None)` if the command executed successfully and did not lead to node creation
/// - `Err(_)` if an error occurred during command execution
#[doc(hidden)] // unstable
pub fn execute_command(self) -> Result<Option<Node>, failure::Error> {
let command = if let Some(args) = self.args {
Command::from_iter(args)
} else {
Command::from_args()
};
if let StandardResult::Run(run_config) = command.execute()? {
let genesis_config = Self::genesis_config(&run_config, self.builtin_instances);
let db_options = &run_config.node_config.private_config.database;
let database = RocksDB::open(run_config.db_path, db_options)?;
let node_config_path = run_config.node_config_path.to_string_lossy();
let config_manager = DefaultConfigManager::new(node_config_path.into_owned());
let rust_runtime = self.rust_runtime;
let node_config = run_config.node_config.into();
let node_keys = run_config.node_keys;
let mut node_builder = CoreNodeBuilder::new(database, node_config, node_keys)
.with_genesis_config(genesis_config)
.with_config_manager(config_manager)
.with_plugin(SystemApiPlugin)
.with_runtime_fn(|channel| rust_runtime.build(channel.endpoints_sender()));
for runtime in self.external_runtimes {
node | {
let temp_dir = TempDir::new()?;
let mut this = Self::with_args(vec![
OsString::from("run-dev"),
OsString::from("--artifacts-dir"),
temp_dir.path().into(),
]);
this.temp_dir = Some(temp_dir);
Ok(this)
} | identifier_body |
lib.rs | Exonum nodes.
//!
//! `exonum-cli` supports multi-stage configuration process made with safety in mind. It involves
//! 4 steps (or stages) and allows to configure and run multiple blockchain nodes without
//! need in exchanging private keys between administrators.
//!
//! # How to Run the Network
//!
//! 1. Generate common (template) part of the nodes configuration using `generate-template` command.
//! Generated `.toml` file must be spread among all the nodes and must be used in the following
//! configuration step.
//! 2. Generate public and secret (private) parts of the node configuration using `generate-config`
//! command. At this step, Exonum will generate master key from which consensus and service
//! validator keys are derived. Master key is stored in the encrypted file. Consensus secret key
//! is used for communications between the nodes, while service secret key is used
//! mainly to sign transactions generated by the node. Both secret keys may be encrypted with a
//! password. The public part of the node configuration must be spread among all nodes, while the
//! secret part must be only accessible by the node administrator only.
//! 3. Generate final node configuration using `finalize` command. Exonum combines secret part of
//! the node configuration with public configurations of every other node, producing a single
//! configuration file with all the necessary node and network settings.
//! 4. Use `run` command and provide it with final node configuration file produced at the previous
//! step. If the secret keys are protected with passwords, the user need to enter the password.
//! Running node will automatically connect to other nodes in the network using IP addresses from
//! public parts of the node configurations.
//!
//! ## Additional Commands
//!
//! `exonum-cli` also supports additional CLI commands for performing maintenance actions by node
//! administrators and easier debugging.
//!
//! * `run-dev` command automatically generates network configuration with a single node and runs
//! it. This command can be useful for fast testing of the services during development process.
//! * `maintenance` command allows to clear node's consensus messages with `clear-cache`, and
//! restart node's service migration script with `restart-migration`.
//!
//! ## How to Extend Parameters
//!
//! `exonum-cli` allows to extend the list of the parameters for any command and even add new CLI
//! commands with arbitrary behavior. To do so, you need to implement a structure with a list of
//! additional parameters and use `flatten` macro attribute of [`serde`][serde] and
//! [`structopt`][structopt] libraries.
//!
//! ```ignore
//! #[derive(Serialize, Deserialize, StructOpt)]
//! struct MyRunCommand {
//! #[serde(flatten)]
//! #[structopt(flatten)]
//! default: Run
//! /// My awesome parameter
//! secret_number: i32
//! }
//! ```
//!
//! You can also create own list of commands by implementing an enum with a similar principle:
//!
//! ```ignore
//! #[derive(StructOpt)]
//! enum MyCommands {
//! #[structopt(name = "run")
//! DefaultRun(Run),
//! #[structopt(name = "my-run")
//! MyAwesomeRun(MyRunCommand),
//! }
//! ```
//!
//! While implementing custom behavior for your commands, you may use
//! [`StandardResult`](./command/enum.StandardResult.html) enum for
//! accessing node configuration files created and filled by the standard Exonum commands.
//!
//! [serde]: https://crates.io/crates/serde
//! [structopt]: https://crates.io/crates/structopt
#![deny(missing_docs)]
pub use crate::config_manager::DefaultConfigManager;
pub use structopt;
use exonum::{
blockchain::config::{GenesisConfig, GenesisConfigBuilder, InstanceInitParams},
merkledb::RocksDB,
runtime::{RuntimeInstance, WellKnownRuntime},
};
use exonum_explorer_service::ExplorerFactory;
use exonum_node::{Node, NodeBuilder as CoreNodeBuilder};
use exonum_rust_runtime::{DefaultInstance, RustRuntimeBuilder, ServiceFactory};
use exonum_supervisor::{Supervisor, SupervisorConfig};
use exonum_system_api::SystemApiPlugin;
use structopt::StructOpt; |
use std::{env, ffi::OsString, iter, path::PathBuf};
use crate::command::{run::NodeRunConfig, Command, ExonumCommand, StandardResult};
pub mod command;
pub mod config;
pub mod io;
pub mod password;
mod config_manager;
/// Rust-specific node builder used for constructing a node with a list
/// of provided services.
#[derive(Debug)]
pub struct NodeBuilder {
rust_runtime: RustRuntimeBuilder,
external_runtimes: Vec<RuntimeInstance>,
builtin_instances: Vec<InstanceInitParams>,
args: Option<Vec<OsString>>,
temp_dir: Option<TempDir>,
}
impl Default for NodeBuilder {
fn default() -> Self {
Self::new()
}
}
impl NodeBuilder {
/// Creates a new builder.
pub fn new() -> Self {
Self {
rust_runtime: RustRuntimeBuilder::new()
.with_factory(Supervisor)
.with_factory(ExplorerFactory),
external_runtimes: vec![],
builtin_instances: vec![],
args: None,
temp_dir: None,
}
}
/// Creates a new builder with the provided command-line arguments. The path
/// to the current executable **does not** need to be specified as the first argument.
#[doc(hidden)] // unstable
pub fn with_args<I>(args: I) -> Self
where
I: IntoIterator,
I::Item: Into<OsString>,
{
let mut this = Self::new();
let executable = env::current_exe()
.map(PathBuf::into_os_string)
.unwrap_or_else(|_| "node".into());
let all_args = iter::once(executable)
.chain(args.into_iter().map(Into::into))
.collect();
this.args = Some(all_args);
this
}
/// Creates a single-node development network with default settings. The node stores
/// its data in a temporary directory, which is automatically removed when the node is stopped.
///
/// # Return value
///
/// Returns an error if the temporary directory cannot be created.
pub fn development_node() -> Result<Self, failure::Error> {
let temp_dir = TempDir::new()?;
let mut this = Self::with_args(vec![
OsString::from("run-dev"),
OsString::from("--artifacts-dir"),
temp_dir.path().into(),
]);
this.temp_dir = Some(temp_dir);
Ok(this)
}
/// Adds new Rust service to the list of available services.
pub fn with_rust_service(mut self, service: impl ServiceFactory) -> Self {
self.rust_runtime = self.rust_runtime.with_factory(service);
self
}
/// Adds a new `Runtime` to the list of available runtimes.
///
/// Note that you don't have to add the Rust runtime, since it is included by default.
pub fn with_external_runtime(mut self, runtime: impl WellKnownRuntime) -> Self {
self.external_runtimes.push(runtime.into());
self
}
/// Adds a service instance that will be available immediately after creating a genesis block.
///
/// For Rust services, the service factory needs to be separately supplied
/// via [`with_rust_service`](#method.with_rust_service).
pub fn with_instance(mut self, instance: impl Into<InstanceInitParams>) -> Self {
self.builtin_instances.push(instance.into());
self
}
/// Adds a default Rust service instance that will be available immediately after creating a
/// genesis block.
pub fn with_default_rust_service(self, service: impl DefaultInstance) -> Self {
self.with_instance(service.default_instance())
.with_rust_service(service)
}
/// Executes a command received from the command line.
///
/// # Return value
///
/// Returns:
///
/// - `Ok(Some(_))` if the command lead to the node creation
/// - `Ok(None)` if the command executed successfully and did not lead to node creation
/// - `Err(_)` if an error occurred during command execution
#[doc(hidden)] // unstable
pub fn execute_command(self) -> Result<Option<Node>, failure::Error> {
let command = if let Some(args) = self.args {
Command::from_iter(args)
} else {
Command::from_args()
};
if let StandardResult::Run(run_config) = command.execute()? {
let genesis_config = Self::genesis_config(&run_config, self.builtin_instances);
let db_options = &run_config.node_config.private_config.database;
let database = RocksDB::open(run_config.db_path, db_options)?;
let node_config_path = run_config.node_config_path.to_string_lossy();
let config_manager = DefaultConfigManager::new(node_config_path.into_owned());
let rust_runtime = self.rust_runtime;
let node_config = run_config.node_config.into();
let node_keys = run_config.node_keys;
let mut node_builder = CoreNodeBuilder::new(database, node_config, node_keys)
.with_genesis_config(genesis_config)
.with_config_manager(config_manager)
.with_plugin(SystemApiPlugin)
.with_runtime_fn(|channel| rust_runtime.build(channel.endpoints_sender()));
for runtime in self.external_runtimes {
node_builder | use tempfile::TempDir; | random_line_split |
lib.rs | public configurations of every other node, producing a single
//! configuration file with all the necessary node and network settings.
//! 4. Use `run` command and provide it with final node configuration file produced at the previous
//! step. If the secret keys are protected with passwords, the user need to enter the password.
//! Running node will automatically connect to other nodes in the network using IP addresses from
//! public parts of the node configurations.
//!
//! ## Additional Commands
//!
//! `exonum-cli` also supports additional CLI commands for performing maintenance actions by node
//! administrators and easier debugging.
//!
//! * `run-dev` command automatically generates network configuration with a single node and runs
//! it. This command can be useful for fast testing of the services during development process.
//! * `maintenance` command allows to clear node's consensus messages with `clear-cache`, and
//! restart node's service migration script with `restart-migration`.
//!
//! ## How to Extend Parameters
//!
//! `exonum-cli` allows to extend the list of the parameters for any command and even add new CLI
//! commands with arbitrary behavior. To do so, you need to implement a structure with a list of
//! additional parameters and use `flatten` macro attribute of [`serde`][serde] and
//! [`structopt`][structopt] libraries.
//!
//! ```ignore
//! #[derive(Serialize, Deserialize, StructOpt)]
//! struct MyRunCommand {
//! #[serde(flatten)]
//! #[structopt(flatten)]
//! default: Run
//! /// My awesome parameter
//! secret_number: i32
//! }
//! ```
//!
//! You can also create own list of commands by implementing an enum with a similar principle:
//!
//! ```ignore
//! #[derive(StructOpt)]
//! enum MyCommands {
//! #[structopt(name = "run")
//! DefaultRun(Run),
//! #[structopt(name = "my-run")
//! MyAwesomeRun(MyRunCommand),
//! }
//! ```
//!
//! While implementing custom behavior for your commands, you may use
//! [`StandardResult`](./command/enum.StandardResult.html) enum for
//! accessing node configuration files created and filled by the standard Exonum commands.
//!
//! [serde]: https://crates.io/crates/serde
//! [structopt]: https://crates.io/crates/structopt
#![deny(missing_docs)]
pub use crate::config_manager::DefaultConfigManager;
pub use structopt;
use exonum::{
blockchain::config::{GenesisConfig, GenesisConfigBuilder, InstanceInitParams},
merkledb::RocksDB,
runtime::{RuntimeInstance, WellKnownRuntime},
};
use exonum_explorer_service::ExplorerFactory;
use exonum_node::{Node, NodeBuilder as CoreNodeBuilder};
use exonum_rust_runtime::{DefaultInstance, RustRuntimeBuilder, ServiceFactory};
use exonum_supervisor::{Supervisor, SupervisorConfig};
use exonum_system_api::SystemApiPlugin;
use structopt::StructOpt;
use tempfile::TempDir;
use std::{env, ffi::OsString, iter, path::PathBuf};
use crate::command::{run::NodeRunConfig, Command, ExonumCommand, StandardResult};
pub mod command;
pub mod config;
pub mod io;
pub mod password;
mod config_manager;
/// Rust-specific node builder used for constructing a node with a list
/// of provided services.
#[derive(Debug)]
pub struct NodeBuilder {
rust_runtime: RustRuntimeBuilder,
external_runtimes: Vec<RuntimeInstance>,
builtin_instances: Vec<InstanceInitParams>,
args: Option<Vec<OsString>>,
temp_dir: Option<TempDir>,
}
impl Default for NodeBuilder {
fn default() -> Self {
Self::new()
}
}
impl NodeBuilder {
/// Creates a new builder.
pub fn new() -> Self {
Self {
rust_runtime: RustRuntimeBuilder::new()
.with_factory(Supervisor)
.with_factory(ExplorerFactory),
external_runtimes: vec![],
builtin_instances: vec![],
args: None,
temp_dir: None,
}
}
/// Creates a new builder with the provided command-line arguments. The path
/// to the current executable **does not** need to be specified as the first argument.
#[doc(hidden)] // unstable
pub fn with_args<I>(args: I) -> Self
where
I: IntoIterator,
I::Item: Into<OsString>,
{
let mut this = Self::new();
let executable = env::current_exe()
.map(PathBuf::into_os_string)
.unwrap_or_else(|_| "node".into());
let all_args = iter::once(executable)
.chain(args.into_iter().map(Into::into))
.collect();
this.args = Some(all_args);
this
}
/// Creates a single-node development network with default settings. The node stores
/// its data in a temporary directory, which is automatically removed when the node is stopped.
///
/// # Return value
///
/// Returns an error if the temporary directory cannot be created.
pub fn development_node() -> Result<Self, failure::Error> {
let temp_dir = TempDir::new()?;
let mut this = Self::with_args(vec![
OsString::from("run-dev"),
OsString::from("--artifacts-dir"),
temp_dir.path().into(),
]);
this.temp_dir = Some(temp_dir);
Ok(this)
}
/// Adds new Rust service to the list of available services.
pub fn with_rust_service(mut self, service: impl ServiceFactory) -> Self {
self.rust_runtime = self.rust_runtime.with_factory(service);
self
}
/// Adds a new `Runtime` to the list of available runtimes.
///
/// Note that you don't have to add the Rust runtime, since it is included by default.
pub fn with_external_runtime(mut self, runtime: impl WellKnownRuntime) -> Self {
self.external_runtimes.push(runtime.into());
self
}
/// Adds a service instance that will be available immediately after creating a genesis block.
///
/// For Rust services, the service factory needs to be separately supplied
/// via [`with_rust_service`](#method.with_rust_service).
pub fn with_instance(mut self, instance: impl Into<InstanceInitParams>) -> Self {
self.builtin_instances.push(instance.into());
self
}
/// Adds a default Rust service instance that will be available immediately after creating a
/// genesis block.
pub fn with_default_rust_service(self, service: impl DefaultInstance) -> Self {
self.with_instance(service.default_instance())
.with_rust_service(service)
}
/// Executes a command received from the command line.
///
/// # Return value
///
/// Returns:
///
/// - `Ok(Some(_))` if the command lead to the node creation
/// - `Ok(None)` if the command executed successfully and did not lead to node creation
/// - `Err(_)` if an error occurred during command execution
#[doc(hidden)] // unstable
pub fn execute_command(self) -> Result<Option<Node>, failure::Error> {
let command = if let Some(args) = self.args {
Command::from_iter(args)
} else {
Command::from_args()
};
if let StandardResult::Run(run_config) = command.execute()? {
let genesis_config = Self::genesis_config(&run_config, self.builtin_instances);
let db_options = &run_config.node_config.private_config.database;
let database = RocksDB::open(run_config.db_path, db_options)?;
let node_config_path = run_config.node_config_path.to_string_lossy();
let config_manager = DefaultConfigManager::new(node_config_path.into_owned());
let rust_runtime = self.rust_runtime;
let node_config = run_config.node_config.into();
let node_keys = run_config.node_keys;
let mut node_builder = CoreNodeBuilder::new(database, node_config, node_keys)
.with_genesis_config(genesis_config)
.with_config_manager(config_manager)
.with_plugin(SystemApiPlugin)
.with_runtime_fn(|channel| rust_runtime.build(channel.endpoints_sender()));
for runtime in self.external_runtimes {
node_builder = node_builder.with_runtime(runtime);
}
Ok(Some(node_builder.build()))
} else {
Ok(None)
}
}
/// Configures the node using parameters provided by user from stdin and then runs it.
pub fn run(mut self) -> Result<(), failure::Error> {
// Store temporary directory until the node is done.
let _temp_dir = self.temp_dir.take();
if let Some(node) = self.execute_command()? {
node.run()
} else {
Ok(())
}
}
fn genesis_config(
run_config: &NodeRunConfig,
default_instances: Vec<InstanceInitParams>,
) -> GenesisConfig {
let mut builder = GenesisConfigBuilder::with_consensus_config(
run_config.node_config.public_config.consensus.clone(),
);
// Add builtin services to genesis config.
builder = builder
.with_artifact(Supervisor.artifact_id())
.with_instance(Self::supervisor_service(&run_config))
.with_artifact(ExplorerFactory.artifact_id())
.with_instance(ExplorerFactory.default_instance());
// Add default instances.
for instance in default_instances {
builder = builder
.with_artifact(instance.instance_spec.artifact.clone())
.with_instance(instance)
}
builder.build()
}
fn | supervisor_service | identifier_name |
|
main.py | to sort by
print("To-Do:")
for item_index in my_list: # The range needs to be the length of the list
# being printed
if item_index.visible and not show_hidden: # Only print visible items
# if show hidden is false
print(item_index.priority, item_index.text, sep='.\t')
elif show_hidden: # Print everything is show hidden is trues
if item_index.visible:
print(item_index.priority, item_index.text, sep='.\t')
else:
print("{0}.~\t{1}".format(item_index.priority, item_index.text)
)
# Indicate hidden items
# Printing the item priority with a dot, then the item, with a tab
# separating them
if to_save:
save_list(my_list, save_file_location)
return
def divider(size=100): # Draws a dividing line to go between sections
# (default 100 characters long)
"""The purpose of this function is to print a dashed line across the
screen with a specified length.
:param size: how many characters long the line should be, default is 100
:returns nothing"""
for i in range(size):
print('-', end='') # Prints out a single dash, no newline afterwards
# (the end= sets the last character to blank
print('') # Print out a newline (using the default ending of a print
# statement being a newline
return
def clean_input(prompt='Error'): # A special input function that will reject a
# user's input of text when a number is requested -- if no prompt is
# specified in the program, it will display "Error"
"""The purpose of this function is to prompt the user for a numerical
input and only accept a numerical input, rejects no input and text input.
:param prompt: the prompt the user sees, default is Error
:returns the user input as a float"""
text = True
phrase = '0'
while text:
phrase = input(prompt + '\n')
try: # Adapted from an example in the ThinkPython textbook (15.7) -
# Checks whether the input is a number, positive or negative. If
# not, rejects the input and user gets to try again
float(phrase)
text = False
except ValueError:
print("Error: Non-Numeric Entry Detected")
# if phrase.isnumeric(): # Checks for a positive number (negative
# rejected as well as text) - replaced with superior form from textbook
# example
# return float(phrase) # Return the number the user entered
# else:
# print("Error: Non-Numeric Entry Detected")
return float(phrase) # Return the number the user entered
def load_from_file(save_location): # This is a function for readability -
# opens txt file in read mode and loads it
"""The purpose of this function is to open the .txt save file and read
the contents into memory in the form of a list of custom ListItem
objects.
:param save_location: the location the save file is stored in
:returns a list of ListItem objects that is populated with the data from
the save file"""
# into an array (list) of ListItem variables
data_file_r = open(save_location, "r") # Open txt file in read mode
list_item = ["Text", -1, 2, True] # Item, Item Priority, group, is visible
todo = [] # make a list of lists
temp = 1 # Temporary counter variable to reconstruct lists from .txt file
line_counter = 1
try:
for item in data_file_r: # loop through each line in the file, one at
# a time - from w3schools.com
if (line_counter - 1) % 5 != 0 and line_counter > 0:
cleaned_item = ""
for character_index in range(len(
item)): # Loop through each character in the extracted
# string
if character_index != len(
item) - 1: # if it is not the last character, add
# it to the cleaned string
cleaned_item += item[character_index]
# Add every character to a
# but \n
if temp == 1: # Item Text
list_item[0] = cleaned_item
temp = 2
elif temp == 2: # Item Priority
list_item[1] = int(cleaned_item)
temp = 3
elif temp == 3: # Item Group
list_item[2] = int(cleaned_item)
temp = 4
elif temp == 4: # Is Visible
if cleaned_item == "False":
list_item[3] = False
else: # Assume the item is visible if the text is not
# False
list_item[3] = True
todo.insert(0, ListItem(list_item[0], list_item[1],
list_item[2], list_item[3]))
temp = 1
else: # If some error occurred and a condition outside of the
# possible four is met, restart
temp = 1
line_counter += 1
except ValueError:
print("An error has occurred trying to load the file")
result = int(clean_input(
"Please enter a 2 to overwrite the current save file and start "
"over or any other number to exit the program"))
if result == 2:
key = random.randint(2, 9) # Generate a random integer between 2
# and 9 to be used as a second dynamic check
if key == 2:
key = 1 # If the random number is 2, set it to one so that
# the same number (2) cannot be used as the verification number
result2 = int(clean_input("Are you sure you want to delete all "
"of your saved data\nEnter {0} to "
"proceed, or anything else to "
"cancel".format(str(key))))
if result2 == key:
data_file_w = open("C:Item_List.txt", "w")
data_file_w.close()
todo = []
print("Save Data Erased")
return todo # Return an empty list if file load failed
else:
print("Program Exiting")
quit(1)
else:
print("Program Exiting")
quit(1) # Exit the program with the exit code of 1
data_file_r.close()
# All the list functions above referenced from w3schools.com What is
# happening above: Opening the file, initializing a list to hold all
# four pieces of data, then after pulling the data from the file and
# storing in the list, it is copied (not referenced) into my main list
# of ListItem objects
return todo
def save_list(todo_list, save_location):
"""The purpose of this function is to save a list of ListItem objects to a
specified location in a .txt file with the first line of the document
being an explanation of the file format being used.
:param todo_list: the list of ListItem objects to save to the save file
:param save_location: the location to create or overwrite the save file
:returns nothing"""
data_file_w = open(save_location,
"w") # open the save file and clear the data from it
data_file_w.write("Warning: The Todo-List Program will not be able to "
"load this save file if it is incorrectly modified. "
"Modify at your own risk. The structure is Entry "
"Text, Entry Priority as a number, Entry Group as a "
"number (Not Yet Utilized, but necessary), and Entry "
"Visibility as a boolean, each on a separate line, a "
"single line gap in between, and the "
"very first line is skipped\n")
for item in todo_list:
data_file_w.write("{0}\n{1}\n{2}\n{3}\n\n".format(item.text,
str(item.priority),
str(item.group),
str(item.visible)))
data_file_w.close()
return
def add_item(todo_list):
"""The purpose of this function is to prompt the user for the two
fields of necessary information to make a new entry in the todo list,
the item name and priority, checking if the priority overlaps with an
existing entry in the todo list.
:param todo_list: the list of ListItem objects to add a new ListItem
object to
:returns nothing"""
text = input("Please enter the name of the new item\n")
priority = check_priority_overlap(
int(clean_input("Please enter the priority of this item")), todo_list)
# group = int(clean_input("Please enter the group number of this item"))
group = 0 # Set the group value to zero, group system NYI
visible = True
todo_list.insert(0, ListItem(text, priority, group, visible)) # Join
# the inputs to be added to the overall list
return
def | select_item | identifier_name |
|
main.py | many characters long the line should be, default is 100
:returns nothing"""
for i in range(size):
print('-', end='') # Prints out a single dash, no newline afterwards
# (the end= sets the last character to blank
print('') # Print out a newline (using the default ending of a print
# statement being a newline
return
def clean_input(prompt='Error'): # A special input function that will reject a
# user's input of text when a number is requested -- if no prompt is
# specified in the program, it will display "Error"
"""The purpose of this function is to prompt the user for a numerical
input and only accept a numerical input, rejects no input and text input.
:param prompt: the prompt the user sees, default is Error
:returns the user input as a float"""
text = True
phrase = '0'
while text:
phrase = input(prompt + '\n')
try: # Adapted from an example in the ThinkPython textbook (15.7) -
# Checks whether the input is a number, positive or negative. If
# not, rejects the input and user gets to try again
float(phrase)
text = False
except ValueError:
print("Error: Non-Numeric Entry Detected")
# if phrase.isnumeric(): # Checks for a positive number (negative
# rejected as well as text) - replaced with superior form from textbook
# example
# return float(phrase) # Return the number the user entered
# else:
# print("Error: Non-Numeric Entry Detected")
return float(phrase) # Return the number the user entered
def load_from_file(save_location): # This is a function for readability -
# opens txt file in read mode and loads it
"""The purpose of this function is to open the .txt save file and read
the contents into memory in the form of a list of custom ListItem
objects.
:param save_location: the location the save file is stored in
:returns a list of ListItem objects that is populated with the data from
the save file"""
# into an array (list) of ListItem variables
data_file_r = open(save_location, "r") # Open txt file in read mode
list_item = ["Text", -1, 2, True] # Item, Item Priority, group, is visible
todo = [] # make a list of lists
temp = 1 # Temporary counter variable to reconstruct lists from .txt file
line_counter = 1
try:
for item in data_file_r: # loop through each line in the file, one at
# a time - from w3schools.com
if (line_counter - 1) % 5 != 0 and line_counter > 0:
cleaned_item = ""
for character_index in range(len(
item)): # Loop through each character in the extracted
# string
if character_index != len(
item) - 1: # if it is not the last character, add
# it to the cleaned string
cleaned_item += item[character_index]
# Add every character to a
# but \n
if temp == 1: # Item Text
list_item[0] = cleaned_item
temp = 2
elif temp == 2: # Item Priority
list_item[1] = int(cleaned_item)
temp = 3
elif temp == 3: # Item Group
list_item[2] = int(cleaned_item)
temp = 4
elif temp == 4: # Is Visible
if cleaned_item == "False":
list_item[3] = False
else: # Assume the item is visible if the text is not
# False
list_item[3] = True
todo.insert(0, ListItem(list_item[0], list_item[1],
list_item[2], list_item[3]))
temp = 1
else: # If some error occurred and a condition outside of the
# possible four is met, restart
temp = 1
line_counter += 1
except ValueError:
print("An error has occurred trying to load the file")
result = int(clean_input(
"Please enter a 2 to overwrite the current save file and start "
"over or any other number to exit the program"))
if result == 2:
key = random.randint(2, 9) # Generate a random integer between 2
# and 9 to be used as a second dynamic check
if key == 2:
key = 1 # If the random number is 2, set it to one so that
# the same number (2) cannot be used as the verification number
result2 = int(clean_input("Are you sure you want to delete all "
"of your saved data\nEnter {0} to "
"proceed, or anything else to "
"cancel".format(str(key))))
if result2 == key:
data_file_w = open("C:Item_List.txt", "w")
data_file_w.close()
todo = []
print("Save Data Erased")
return todo # Return an empty list if file load failed
else:
print("Program Exiting")
quit(1)
else:
print("Program Exiting")
quit(1) # Exit the program with the exit code of 1
data_file_r.close()
# All the list functions above referenced from w3schools.com What is
# happening above: Opening the file, initializing a list to hold all
# four pieces of data, then after pulling the data from the file and
# storing in the list, it is copied (not referenced) into my main list
# of ListItem objects
return todo
def save_list(todo_list, save_location):
"""The purpose of this function is to save a list of ListItem objects to a
specified location in a .txt file with the first line of the document
being an explanation of the file format being used.
:param todo_list: the list of ListItem objects to save to the save file
:param save_location: the location to create or overwrite the save file
:returns nothing"""
data_file_w = open(save_location,
"w") # open the save file and clear the data from it
data_file_w.write("Warning: The Todo-List Program will not be able to "
"load this save file if it is incorrectly modified. "
"Modify at your own risk. The structure is Entry "
"Text, Entry Priority as a number, Entry Group as a "
"number (Not Yet Utilized, but necessary), and Entry "
"Visibility as a boolean, each on a separate line, a "
"single line gap in between, and the "
"very first line is skipped\n")
for item in todo_list:
data_file_w.write("{0}\n{1}\n{2}\n{3}\n\n".format(item.text,
str(item.priority),
str(item.group),
str(item.visible)))
data_file_w.close()
return
def add_item(todo_list):
"""The purpose of this function is to prompt the user for the two
fields of necessary information to make a new entry in the todo list,
the item name and priority, checking if the priority overlaps with an
existing entry in the todo list.
:param todo_list: the list of ListItem objects to add a new ListItem
object to
:returns nothing"""
text = input("Please enter the name of the new item\n")
priority = check_priority_overlap(
int(clean_input("Please enter the priority of this item")), todo_list)
# group = int(clean_input("Please enter the group number of this item"))
group = 0 # Set the group value to zero, group system NYI
visible = True
todo_list.insert(0, ListItem(text, priority, group, visible)) # Join
# the inputs to be added to the overall list
return
def select_item(todo_list, prompt='Error'): # Ask the user
# which item from the list is to be modified
| """The purpose of this function is to display a list of all items in the
todo list and number each individually to allow the user to select an
item to modify or delete. The available numbers may
skip some if some items are hidden
:param todo_list: the list of ListItem objects to display
:param prompt: the prompt to display to the user, default is Error
:returns the user selected item's index in a computer friendly form (
starting at 0 instead of 1)"""
valid = False
index = 0
while not valid:
counter = 1 # counter for index printing
for item in todo_list: # The range needs to be the length of the list
# being printed
if item.visible:
print(counter, item.text, sep='\t')
else:
print(counter, "~ {0} ~".format(item.text), sep='\t')
counter += 1
# Printing the item number, then the item, with a tab separating
| identifier_body |
|
main.py | 1
line_counter += 1
except ValueError:
print("An error has occurred trying to load the file")
result = int(clean_input(
"Please enter a 2 to overwrite the current save file and start "
"over or any other number to exit the program"))
if result == 2:
key = random.randint(2, 9) # Generate a random integer between 2
# and 9 to be used as a second dynamic check
if key == 2:
key = 1 # If the random number is 2, set it to one so that
# the same number (2) cannot be used as the verification number
result2 = int(clean_input("Are you sure you want to delete all "
"of your saved data\nEnter {0} to "
"proceed, or anything else to "
"cancel".format(str(key))))
if result2 == key:
data_file_w = open("C:Item_List.txt", "w")
data_file_w.close()
todo = []
print("Save Data Erased")
return todo # Return an empty list if file load failed
else:
print("Program Exiting")
quit(1)
else:
print("Program Exiting")
quit(1) # Exit the program with the exit code of 1
data_file_r.close()
# All the list functions above referenced from w3schools.com What is
# happening above: Opening the file, initializing a list to hold all
# four pieces of data, then after pulling the data from the file and
# storing in the list, it is copied (not referenced) into my main list
# of ListItem objects
return todo
def save_list(todo_list, save_location):
"""The purpose of this function is to save a list of ListItem objects to a
specified location in a .txt file with the first line of the document
being an explanation of the file format being used.
:param todo_list: the list of ListItem objects to save to the save file
:param save_location: the location to create or overwrite the save file
:returns nothing"""
data_file_w = open(save_location,
"w") # open the save file and clear the data from it
data_file_w.write("Warning: The Todo-List Program will not be able to "
"load this save file if it is incorrectly modified. "
"Modify at your own risk. The structure is Entry "
"Text, Entry Priority as a number, Entry Group as a "
"number (Not Yet Utilized, but necessary), and Entry "
"Visibility as a boolean, each on a separate line, a "
"single line gap in between, and the "
"very first line is skipped\n")
for item in todo_list:
data_file_w.write("{0}\n{1}\n{2}\n{3}\n\n".format(item.text,
str(item.priority),
str(item.group),
str(item.visible)))
data_file_w.close()
return
def add_item(todo_list):
"""The purpose of this function is to prompt the user for the two
fields of necessary information to make a new entry in the todo list,
the item name and priority, checking if the priority overlaps with an
existing entry in the todo list.
:param todo_list: the list of ListItem objects to add a new ListItem
object to
:returns nothing"""
text = input("Please enter the name of the new item\n")
priority = check_priority_overlap(
int(clean_input("Please enter the priority of this item")), todo_list)
# group = int(clean_input("Please enter the group number of this item"))
group = 0 # Set the group value to zero, group system NYI
visible = True
todo_list.insert(0, ListItem(text, priority, group, visible)) # Join
# the inputs to be added to the overall list
return
def select_item(todo_list, prompt='Error'): # Ask the user
# which item from the list is to be modified
"""The purpose of this function is to display a list of all items in the
todo list and number each individually to allow the user to select an
item to modify or delete. The available numbers may
skip some if some items are hidden
:param todo_list: the list of ListItem objects to display
:param prompt: the prompt to display to the user, default is Error
:returns the user selected item's index in a computer friendly form (
starting at 0 instead of 1)"""
valid = False
index = 0
while not valid:
counter = 1 # counter for index printing
for item in todo_list: # The range needs to be the length of the list
# being printed
if item.visible:
print(counter, item.text, sep='\t')
else:
print(counter, "~ {0} ~".format(item.text), sep='\t')
counter += 1
# Printing the item number, then the item, with a tab separating
# them
index = int(clean_input(prompt))
if index < counter:
valid = True
else:
print("Invalid Input: Number is too big")
return index - 1
def remove_item(todo_list):
"""The purpose of this function is to delete a ListItem object from a
list of ListItem objects by prompting the user for the index and
verifying they want to delete the item.
:param todo_list: the list of ListItem objects from which to remove
one object
:returns nothing"""
item = select_item(todo_list, "Please enter the item number you wish to "
"remove\nEnter a negative number or zero "
"to cancel")
if item >= 0: # 0, not 1 because the index returned is shifted to be
# computer friendly
todo_list.pop(item)
return
def mark_complete(todo_list):
"""The purpose of this function is to mark a selectedListItem object as
hidden and not to be printed unless specified, apart from selecting items.
:param todo_list: the list of ListItem objects to modify
:returns nothing"""
item = select_item(todo_list, "Please enter the item number you wish to "
"Mark Completed and hide from the "
"list\nEnter a negative number or zero to "
"cancel")
if item >= 0:
todo_list[item].visible = False
return
def edit_item(todo_list):
"""The purpose of this function is to edit a ListItem object in the
list of ListItem objects, changing either the name or priority
:param todo_list: the list of ListItem objects that gets one object
modified
:returns nothing"""
item = select_item(todo_list, "Please enter the item number you wish to "
"edit\nEnter a negative number or zero to "
"cancel")
if item >= 0:
while True:
value = clean_input("Which value would you like to edit? Enter:\n1"
" for the Item Text (Currently: {0})\n2 for "
"the Item Priority (Currently: {1})\n3 to "
"Cancel and Exit".format(todo_list[item].text,
str(todo_list[item].
priority)))
if value == 1: # Item Text Change
print("The Current Text is: {0}".format(todo_list[item].text))
todo_list[item].text = input("New Text:\n")
elif value == 2: # Item Priority Change
print("The Current Priority is: {0}".format(str(todo_list[item]
.priority)))
todo_list[item].priority = check_priority_overlap(
int(clean_input("New Priority:")), todo_list)
# elif value == 3: # Item Group Change
# print(f"The Current Group is: {todo_list[item].group}")
# todo_list[item].group = int(clean_input("New Group Number:"))
elif value == 3: # Exit Changing Menu
break
else:
print("Invalid Input - Please Try Again")
return
def check_list_status(todo_list): # Checks if the list is completely hidden
# (2), completely empty (1), or neither (0)
"""The purpose of this function is to check whether there are visible
items in the list, the entire list is hidden, or the list contains no
more ListItem objects
:param todo_list: the list of ListItem objects to check
:returns which condition using integer codes"""
if len(todo_list) == 0:
state = 1 # Empty List
else:
state = 2 # Entirely Hidden List
for item_index in range(len(todo_list)):
if todo_list[item_index].visible: # If an item is visible, then
# they are not all hidden
state = 0 # Neither
return state
def menu_loop(todo_list, save_file_location):
"""The purpose of this function is to repeatedly display the todo list
and user prompts menu until the program is closed
:param todo_list: the list of ListItem objects to display or modify
:param save_file_location: where the .txt save file is located for saving
:returns nothing"""
show_hidden = False
selection = 0
| random_line_split |
||
main.py | else: # Assume the item is visible if the text is not
# False
list_item[3] = True
todo.insert(0, ListItem(list_item[0], list_item[1],
list_item[2], list_item[3]))
temp = 1
else: # If some error occurred and a condition outside of the
# possible four is met, restart
temp = 1
line_counter += 1
except ValueError:
print("An error has occurred trying to load the file")
result = int(clean_input(
"Please enter a 2 to overwrite the current save file and start "
"over or any other number to exit the program"))
if result == 2:
key = random.randint(2, 9) # Generate a random integer between 2
# and 9 to be used as a second dynamic check
if key == 2:
key = 1 # If the random number is 2, set it to one so that
# the same number (2) cannot be used as the verification number
result2 = int(clean_input("Are you sure you want to delete all "
"of your saved data\nEnter {0} to "
"proceed, or anything else to "
"cancel".format(str(key))))
if result2 == key:
data_file_w = open("C:Item_List.txt", "w")
data_file_w.close()
todo = []
print("Save Data Erased")
return todo # Return an empty list if file load failed
else:
print("Program Exiting")
quit(1)
else:
print("Program Exiting")
quit(1) # Exit the program with the exit code of 1
data_file_r.close()
# All the list functions above referenced from w3schools.com What is
# happening above: Opening the file, initializing a list to hold all
# four pieces of data, then after pulling the data from the file and
# storing in the list, it is copied (not referenced) into my main list
# of ListItem objects
return todo
def save_list(todo_list, save_location):
"""The purpose of this function is to save a list of ListItem objects to a
specified location in a .txt file with the first line of the document
being an explanation of the file format being used.
:param todo_list: the list of ListItem objects to save to the save file
:param save_location: the location to create or overwrite the save file
:returns nothing"""
data_file_w = open(save_location,
"w") # open the save file and clear the data from it
data_file_w.write("Warning: The Todo-List Program will not be able to "
"load this save file if it is incorrectly modified. "
"Modify at your own risk. The structure is Entry "
"Text, Entry Priority as a number, Entry Group as a "
"number (Not Yet Utilized, but necessary), and Entry "
"Visibility as a boolean, each on a separate line, a "
"single line gap in between, and the "
"very first line is skipped\n")
for item in todo_list:
data_file_w.write("{0}\n{1}\n{2}\n{3}\n\n".format(item.text,
str(item.priority),
str(item.group),
str(item.visible)))
data_file_w.close()
return
def add_item(todo_list):
"""The purpose of this function is to prompt the user for the two
fields of necessary information to make a new entry in the todo list,
the item name and priority, checking if the priority overlaps with an
existing entry in the todo list.
:param todo_list: the list of ListItem objects to add a new ListItem
object to
:returns nothing"""
text = input("Please enter the name of the new item\n")
priority = check_priority_overlap(
int(clean_input("Please enter the priority of this item")), todo_list)
# group = int(clean_input("Please enter the group number of this item"))
group = 0 # Set the group value to zero, group system NYI
visible = True
todo_list.insert(0, ListItem(text, priority, group, visible)) # Join
# the inputs to be added to the overall list
return
def select_item(todo_list, prompt='Error'): # Ask the user
# which item from the list is to be modified
"""The purpose of this function is to display a list of all items in the
todo list and number each individually to allow the user to select an
item to modify or delete. The available numbers may
skip some if some items are hidden
:param todo_list: the list of ListItem objects to display
:param prompt: the prompt to display to the user, default is Error
:returns the user selected item's index in a computer friendly form (
starting at 0 instead of 1)"""
valid = False
index = 0
while not valid:
counter = 1 # counter for index printing
for item in todo_list: # The range needs to be the length of the list
# being printed
if item.visible:
print(counter, item.text, sep='\t')
else:
print(counter, "~ {0} ~".format(item.text), sep='\t')
counter += 1
# Printing the item number, then the item, with a tab separating
# them
index = int(clean_input(prompt))
if index < counter:
valid = True
else:
print("Invalid Input: Number is too big")
return index - 1
def remove_item(todo_list):
"""The purpose of this function is to delete a ListItem object from a
list of ListItem objects by prompting the user for the index and
verifying they want to delete the item.
:param todo_list: the list of ListItem objects from which to remove
one object
:returns nothing"""
item = select_item(todo_list, "Please enter the item number you wish to "
"remove\nEnter a negative number or zero "
"to cancel")
if item >= 0: # 0, not 1 because the index returned is shifted to be
# computer friendly
todo_list.pop(item)
return
def mark_complete(todo_list):
"""The purpose of this function is to mark a selectedListItem object as
hidden and not to be printed unless specified, apart from selecting items.
:param todo_list: the list of ListItem objects to modify
:returns nothing"""
item = select_item(todo_list, "Please enter the item number you wish to "
"Mark Completed and hide from the "
"list\nEnter a negative number or zero to "
"cancel")
if item >= 0:
todo_list[item].visible = False
return
def edit_item(todo_list):
"""The purpose of this function is to edit a ListItem object in the
list of ListItem objects, changing either the name or priority
:param todo_list: the list of ListItem objects that gets one object
modified
:returns nothing"""
item = select_item(todo_list, "Please enter the item number you wish to "
"edit\nEnter a negative number or zero to "
"cancel")
if item >= 0:
while True:
value = clean_input("Which value would you like to edit? Enter:\n1"
" for the Item Text (Currently: {0})\n2 for "
"the Item Priority (Currently: {1})\n3 to "
"Cancel and Exit".format(todo_list[item].text,
str(todo_list[item].
priority)))
if value == 1: # Item Text Change
print("The Current Text is: {0}".format(todo_list[item].text))
todo_list[item].text = input("New Text:\n")
elif value == 2: # Item Priority Change
print("The Current Priority is: {0}".format(str(todo_list[item]
.priority)))
todo_list[item].priority = check_priority_overlap(
int(clean_input("New Priority:")), todo_list)
# elif value == 3: # Item Group Change
# print(f"The Current Group is: {todo_list[item].group}")
# todo_list[item].group = int(clean_input("New Group Number:"))
elif value == 3: # Exit Changing Menu
break
else:
print("Invalid Input - Please Try Again")
return
def check_list_status(todo_list): # Checks if the list is completely hidden
# (2), completely empty (1), or neither (0)
"""The purpose of this function is to check whether there are visible
items in the list, the entire list is hidden, or the list contains no
more ListItem objects
:param todo_list: the list of ListItem objects to check
:returns which condition using integer codes"""
if len(todo_list) == 0:
state = 1 # Empty List
else:
state = 2 # Entirely Hidden List
for item_index in range(len(todo_list)):
| if todo_list[item_index].visible: # If an item is visible, then
# they are not all hidden
state = 0 # Neither
| conditional_block |
|
routes.py | f get_sensor_type_id(sensor_type_name):
"""Given a sensor type name, get the ID of the sensor type from the database."""
query = db.session.query(
TypeClass.id,
).filter(TypeClass.sensor_type == sensor_type_name)
sensor_id = db.session.execute(query).fetchone()
if isinstance(sensor_id, Iterable):
sensor_id = sensor_id[0]
return sensor_id
def get_table_by_sensor_type(sensor_type_id):
"""Return the SQLAlchemy table/subquery corresponding to a given sensor type ID."""
# Because of how global constants work in Flask, DATA_COLUMNS_BY_SENSOR_TYPE has
# functions that return the relevant table/subquery, rather than the
# tables/subqueries themselves. Hence the calls like `value()` and setting
# `value = lambda: None`
global DATA_TABLES_BY_SENSOR_TYPE
if sensor_type_id in DATA_TABLES_BY_SENSOR_TYPE:
return DATA_TABLES_BY_SENSOR_TYPE[sensor_type_id]()
else:
sensor_type_name = get_sensor_type_name(sensor_type_id)
if sensor_type_name in DATA_TABLES_BY_SENSOR_TYPE:
value = DATA_TABLES_BY_SENSOR_TYPE[sensor_type_name]
else:
value = lambda: None
DATA_TABLES_BY_SENSOR_TYPE[sensor_type_id] = value
return value()
def get_columns_by_sensor_type(sensor_type_id):
"""Return the names of the data columns in the table corresponding to a given sensor
type ID.
By "data columns" we mean the ones that depend on the sensor type and hold the
actual data, e.g. temperature and humidity, but not timestamp. The return values are
dictionaries with two keys, "column_name" for the name by which the database knows
this column, and "ui_name" for nice human-readable name fit for a UI.
"""
global DATA_COLUMNS_BY_SENSOR_TYPE
if sensor_type_id in DATA_COLUMNS_BY_SENSOR_TYPE:
return DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_id]
else:
sensor_type_name = get_sensor_type_name(sensor_type_id)
if sensor_type_name in DATA_COLUMNS_BY_SENSOR_TYPE:
value = DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_name]
else:
value = None
DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_id] = value
return value
def get_default_sensor_type():
"""Get the ID of the default sensor type."""
return get_sensor_type_id(DEFAULT_SENSOR_TYPE)
def is_valid_sensor_type(sensor_type_id):
"""Return True if we have the necessary metadata about the table and its columns
needed for fetching and plotting data for the given sensor type, otherwise False.
"""
return (
get_table_by_sensor_type(sensor_type_id) is not None
and get_columns_by_sensor_type(sensor_type_id) is not None
)
# # # DONE WITH GLOBAL CONSTANTS AND SENSOR TYPE METADATA, BEGIN MAIN CONTENT # # #
def resample(df, bins, dt_from, dt_to):
"""
Resamples (adds missing date/temperature bin combinations) to a dataframe.
Arguments:
df: dataframe with temperature assign to bins
bins: temperature bins as a list
dt_from: date range from
dt_to: date range to
Returns:
bins_list: a list of temperature bins
df_list: a list of df corresponding to temperature bins
"""
bins_list = []
for i in range(len(bins) - 1):
bins_list.append("(%.1f, %.1f]" % (bins[i], bins[i + 1]))
date_min = min(df["date"].min(), dt_from)
date_max = max(df["date"].max(), dt_to)
for n in range(int((date_max - date_min).days) + 1):
day = date_min + timedelta(n)
for temp_range in bins_list:
if len(df[(df["date"] == day) & (df["temp_bin"] == temp_range)].index) == 0:
df2 = pd.DataFrame(
{"date": [day], "temp_bin": [temp_range], "temp_cnt": [0]}
)
df = df.append(df2)
df = df.sort_values(by=["date", "temp_bin"], ascending=True)
df.reset_index(inplace=True, drop=True)
df_list = []
for bin_range in bins_list:
df_bin = df[df["temp_bin"] == bin_range]
del df_bin["temp_bin"]
df_bin.reset_index(inplace=True, drop=True)
df_list.append(df_bin)
return bins_list, df_list
def lights_energy_use(dt_from_, dt_to_):
"""
Energy use from Carpenter's place (with lights - called Clapham in the database)
Arguments:
dt_from_: date range from
dt_to_: date range to
Returns:
lights_results_df - a pandas dataframe with mean lights on values
"""
dt_from = pd.to_datetime(dt_from_.date()) + timedelta(hours=14)
dt_to = pd.to_datetime(dt_to_.date()) + timedelta(days=1, hours=15)
d_from = pd.to_datetime(dt_from_.date())
d_to = pd.to_datetime(dt_to_.date())
col_ec = "electricity_consumption"
sensor_device_id = "Clapham"
lights_on_cols = []
# getting eneregy data for the analysis
query = db.session.query(
ReadingsEnergyClass.timestamp,
ReadingsEnergyClass.electricity_consumption,
).filter(
and_(
SensorClass.device_id == sensor_device_id,
ReadingsEnergyClass.sensor_id == SensorClass.id,
ReadingsEnergyClass.timestamp >= dt_from,
ReadingsEnergyClass.timestamp <= dt_to,
)
)
df = pd.read_sql(query.statement, query.session.bind)
if df.empty:
return pd.DataFrame({"date": [], "mean_lights_on": []})
# Reseting index
df.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True)
# grouping data by date-hour
energy_hour = (
df.groupby(
by=[
df["timestamp"].map(
lambda x: pd.to_datetime(
"%04d-%02d-%02d-%02d" % (x.year, x.month, x.day, x.hour),
format="%Y-%m-%d-%H",
)
),
]
)["electricity_consumption"]
.sum()
.reset_index()
)
# Sorting and reseting index
energy_hour.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True)
# energy dates. Energy date starts from 4pm each day and lasts for 24 hours
energy_hour.loc[
energy_hour["timestamp"].dt.hour < 15, "energy_date"
] = pd.to_datetime((energy_hour["timestamp"] + timedelta(days=-1)).dt.date)
energy_hour.loc[
energy_hour["timestamp"].dt.hour >= 15, "energy_date"
] = pd.to_datetime(energy_hour["timestamp"].dt.date)
# Clasification of lights being on
# Lights ON 1: Lights turn on at 4pm and turn off at 9am, as scheduled.
energy_hour["lights_on_1"] = energy_hour["timestamp"].apply(
lambda x: 1 if (x.hour >= 17 or x.hour < 10) else 0
)
lights_on_cols.append("lights_on_1")
# Lights ON 2: Lights are calculated by estimating the lighting use as between
# the minima of two consecutive days. The lights are considered on when the
# energy use is above the day's first quartile of lighting of this difference.
# energy_hour['lights_on_2'] = 0
# lights_on_cols.append('lights_on_2')
# Lights ON 3: Lights are assumed to be on if the energy demand is over 30 kW
# (max load of the extraction fan)
energy_hour["lights_on_3"] = energy_hour[col_ec].apply(
lambda x: 1 if (x > 30.0) else 0
)
lights_on_cols.append("lights_on_3")
# Lights ON 4: Lights are assumed to turn on at the time of largest energy use
# increase in the day, and turn off at the time of largest energy decrease of
# the day.
# estimating energy difference
energy_hour["dE"] = energy_hour[col_ec] - energy_hour[col_ec].shift(1)
energy_hour["dE"] = energy_hour["dE"].fillna(0.0)
# finding max increase and min decrease
energy_hour["dE_min"] = energy_hour.groupby("energy_date")["dE"].transform("min")
energy_hour["dE_max"] = energy_hour.groupby("energy_date")["dE"].transform("max")
energy_hour.loc[
np.isclose(energy_hour["dE_max"], energy_hour["dE"]), "lights_on_4"
] = 1
energy_hour.loc[
np.isclose(energy_hour["d | ven a sensor type ID, get the name of the sensor type from the database."""
query = db.session.query(
TypeClass.sensor_type,
).filter(TypeClass.id == sensor_type_id)
sensor_name = db.session.execute(query).fetchone()
if isinstance(sensor_name, Iterable):
sensor_name = sensor_name[0]
return sensor_name
de | identifier_body |
|
routes.py | date()) + timedelta(hours=14)
dt_to = pd.to_datetime(dt_to_.date()) + timedelta(days=1, hours=15)
d_from = pd.to_datetime(dt_from_.date())
d_to = pd.to_datetime(dt_to_.date())
col_ec = "electricity_consumption"
sensor_device_id = "Clapham"
lights_on_cols = []
# getting eneregy data for the analysis
query = db.session.query(
ReadingsEnergyClass.timestamp,
ReadingsEnergyClass.electricity_consumption,
).filter(
and_(
SensorClass.device_id == sensor_device_id,
ReadingsEnergyClass.sensor_id == SensorClass.id,
ReadingsEnergyClass.timestamp >= dt_from,
ReadingsEnergyClass.timestamp <= dt_to,
)
)
df = pd.read_sql(query.statement, query.session.bind)
if df.empty:
return pd.DataFrame({"date": [], "mean_lights_on": []})
# Reseting index
df.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True)
# grouping data by date-hour
energy_hour = (
df.groupby(
by=[
df["timestamp"].map(
lambda x: pd.to_datetime(
"%04d-%02d-%02d-%02d" % (x.year, x.month, x.day, x.hour),
format="%Y-%m-%d-%H",
)
),
]
)["electricity_consumption"]
.sum()
.reset_index()
)
# Sorting and reseting index
energy_hour.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True)
# energy dates. Energy date starts from 4pm each day and lasts for 24 hours
energy_hour.loc[
energy_hour["timestamp"].dt.hour < 15, "energy_date"
] = pd.to_datetime((energy_hour["timestamp"] + timedelta(days=-1)).dt.date)
energy_hour.loc[
energy_hour["timestamp"].dt.hour >= 15, "energy_date"
] = pd.to_datetime(energy_hour["timestamp"].dt.date)
# Clasification of lights being on
# Lights ON 1: Lights turn on at 4pm and turn off at 9am, as scheduled.
energy_hour["lights_on_1"] = energy_hour["timestamp"].apply(
lambda x: 1 if (x.hour >= 17 or x.hour < 10) else 0
)
lights_on_cols.append("lights_on_1")
# Lights ON 2: Lights are calculated by estimating the lighting use as between
# the minima of two consecutive days. The lights are considered on when the
# energy use is above the day's first quartile of lighting of this difference.
# energy_hour['lights_on_2'] = 0
# lights_on_cols.append('lights_on_2')
# Lights ON 3: Lights are assumed to be on if the energy demand is over 30 kW
# (max load of the extraction fan)
energy_hour["lights_on_3"] = energy_hour[col_ec].apply(
lambda x: 1 if (x > 30.0) else 0
)
lights_on_cols.append("lights_on_3")
# Lights ON 4: Lights are assumed to turn on at the time of largest energy use
# increase in the day, and turn off at the time of largest energy decrease of
# the day.
# estimating energy difference
energy_hour["dE"] = energy_hour[col_ec] - energy_hour[col_ec].shift(1)
energy_hour["dE"] = energy_hour["dE"].fillna(0.0)
# finding max increase and min decrease
energy_hour["dE_min"] = energy_hour.groupby("energy_date")["dE"].transform("min")
energy_hour["dE_max"] = energy_hour.groupby("energy_date")["dE"].transform("max")
energy_hour.loc[
np.isclose(energy_hour["dE_max"], energy_hour["dE"]), "lights_on_4"
] = 1
energy_hour.loc[
np.isclose(energy_hour["dE_min"], energy_hour["dE"]), "lights_on_4"
] = 0
# repeat last?
prev_row_value = None
for df_index in energy_hour.index:
if df_index > 0:
if np.isnan(energy_hour.loc[df_index, "lights_on_4"]) and not np.isnan(
prev_row_value
):
energy_hour.loc[df_index, "lights_on_4"] = prev_row_value
prev_row_value = energy_hour.loc[df_index, "lights_on_4"]
lights_on_cols.append("lights_on_4")
# Lights ON 5: Lights are assumed on if the energy use is over 0.9
# times the days' energy use mean, and the energy demand is over 30 kW.
energy_hour["energy_date_mean"] = energy_hour.groupby("energy_date")[
col_ec
].transform("mean")
energy_hour["lights_on_5"] = np.where(
(energy_hour[col_ec] > 30.0)
& (energy_hour[col_ec] > 0.9 * energy_hour["energy_date_mean"]),
1,
0,
)
lights_on_cols.append("lights_on_5")
# getting the mean value of lights on per day
energy_date_df = energy_hour.loc[
(energy_hour["energy_date"] >= d_from) & (energy_hour["energy_date"] <= d_to)
]
energy_date_df = (
energy_date_df.groupby(by=["energy_date"])[lights_on_cols].sum().reset_index()
)
energy_date_df["mean_lights_on"] = energy_date_df[lights_on_cols].sum(axis=1) / len(
lights_on_cols
)
energy_date_df["date"] = energy_date_df["energy_date"].dt.strftime("%Y-%m-%d")
lights_results_df = energy_date_df[["date", "mean_lights_on"]]
return lights_results_df
def ventilation_energy_use(dt_from, dt_to):
"""
In our data this is called Carpenter’s Place. This reading only counts energy use for
the second extraction fan.
Arguments:
dt_from: date range from
dt_to: date range to
Returns:
ventilation_results_df - a pandas dataframe with ventilation analysis results
"""
sensor_device_id = "1a Carpenters Place"
# getting eneregy data for the analysis
query = db.session.query(
ReadingsEnergyClass.timestamp,
ReadingsEnergyClass.electricity_consumption,
).filter(
and_(
SensorClass.device_id == sensor_device_id,
ReadingsEnergyClass.sensor_id == SensorClass.id,
ReadingsEnergyClass.timestamp >= dt_from,
ReadingsEnergyClass.timestamp <= dt_to,
)
)
df = pd.read_sql(query.statement, query.session.bind)
if df.empty:
return pd.DataFrame({"timestamp": [], "ach": []})
# Reseting index
df.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True)
# grouping data by date-hour
energy_hour = (
df.groupby(
by=[
df["timestamp"].map(
lambda x: "%04d-%02d-%02d %02d:00"
% (x.year, x.month, x.day, x.hour)
),
]
)["electricity_consumption"]
.sum()
.reset_index()
)
# Sorting and reseting index
energy_hour.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True)
# Calculating air exchange per hour
energy_hour["ach"] = (
energy_hour["electricity_consumption"] / CONST_SFP * 3600.0 / (CONST_VTOT / 2.0)
)
ventilation_results_df = energy_hour[["timestamp", "ach"]]
return ventilation_results_df
def aranet_trh_analysis(dt_from, dt_to):
"""
Performs data analysis for Aranet Temperature+Relative Humidity sensors.
Arguments:
dt_from_: date range from
dt_to_: date range to
Returns:
sensor_names: a list of sensor names
sensor_temp_ranges: json data with temperate ranges
"""
logging.info(
"Calling aranet_trh_analysis with parameters %s %s"
% (
dt_from.strftime(CONST_TIMESTAMP_FORMAT),
dt_to.strftime(CONST_TIMESTAMP_FORMAT),
)
)
query = db.session.query(
ReadingsAranetTRHClass.timestamp,
ReadingsAranetTRHClass.sensor_id,
SensorClass.name,
ReadingsAranetTRHClass.temperature,
ReadingsAranetTRHClass.humidity,
).filter(
and_(
ReadingsAranetTRHClass.sensor_id == SensorClass.id,
ReadingsAranetTRHClass.timestamp >= dt_from,
ReadingsAranetTRHClass.timestamp <= dt_to,
)
)
df = pd.read_sql(query.statement, query.session.bind)
logging.info("Total number of records found: %d" % (len(df.index)))
return temperature_range_analysis(df, dt_from, dt_to)
def tempera | ture_range_analysis(temp_d | identifier_name |
|
routes.py | ensor_type_name]
else:
value = lambda: None
DATA_TABLES_BY_SENSOR_TYPE[sensor_type_id] = value
return value()
def get_columns_by_sensor_type(sensor_type_id):
"""Return the names of the data columns in the table corresponding to a given sensor
type ID.
By "data columns" we mean the ones that depend on the sensor type and hold the
actual data, e.g. temperature and humidity, but not timestamp. The return values are
dictionaries with two keys, "column_name" for the name by which the database knows
this column, and "ui_name" for nice human-readable name fit for a UI.
"""
global DATA_COLUMNS_BY_SENSOR_TYPE
if sensor_type_id in DATA_COLUMNS_BY_SENSOR_TYPE:
return DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_id]
else:
sensor_type_name = get_sensor_type_name(sensor_type_id)
if sensor_type_name in DATA_COLUMNS_BY_SENSOR_TYPE:
value = DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_name]
else:
value = None
DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_id] = value
return value
def get_default_sensor_type():
"""Get the ID of the default sensor type."""
return get_sensor_type_id(DEFAULT_SENSOR_TYPE)
def is_valid_sensor_type(sensor_type_id):
"""Return True if we have the necessary metadata about the table and its columns
needed for fetching and plotting data for the given sensor type, otherwise False.
"""
return (
get_table_by_sensor_type(sensor_type_id) is not None
and get_columns_by_sensor_type(sensor_type_id) is not None
)
# # # DONE WITH GLOBAL CONSTANTS AND SENSOR TYPE METADATA, BEGIN MAIN CONTENT # # #
def resample(df, bins, dt_from, dt_to):
"""
Resamples (adds missing date/temperature bin combinations) to a dataframe.
Arguments:
df: dataframe with temperature assign to bins
bins: temperature bins as a list
dt_from: date range from
dt_to: date range to
Returns:
bins_list: a list of temperature bins
df_list: a list of df corresponding to temperature bins
"""
bins_list = []
for i in range(len(bins) - 1):
bins_list.append("(%.1f, %.1f]" % (bins[i], bins[i + 1]))
date_min = min(df["date"].min(), dt_from)
date_max = max(df["date"].max(), dt_to)
for n in range(int((date_max - date_min).days) + 1):
day = date_min + timedelta(n)
for temp_range in bins_list:
if len(df[(df["date"] == day) & (df["temp_bin"] == temp_range)].index) == 0:
df2 = pd.DataFrame(
{"date": [day], "temp_bin": [temp_range], "temp_cnt": [0]}
)
df = df.append(df2)
df = df.sort_values(by=["date", "temp_bin"], ascending=True)
df.reset_index(inplace=True, drop=True)
df_list = []
for bin_range in bins_list:
df_bin = df[df["temp_bin"] == bin_range]
del df_bin["temp_bin"]
df_bin.reset_index(inplace=True, drop=True)
df_list.append(df_bin)
return bins_list, df_list
def lights_energy_use(dt_from_, dt_to_):
"""
Energy use from Carpenter's place (with lights - called Clapham in the database)
Arguments:
dt_from_: date range from
dt_to_: date range to
Returns:
lights_results_df - a pandas dataframe with mean lights on values
"""
dt_from = pd.to_datetime(dt_from_.date()) + timedelta(hours=14)
dt_to = pd.to_datetime(dt_to_.date()) + timedelta(days=1, hours=15)
d_from = pd.to_datetime(dt_from_.date())
d_to = pd.to_datetime(dt_to_.date())
col_ec = "electricity_consumption"
sensor_device_id = "Clapham"
lights_on_cols = []
# getting eneregy data for the analysis
query = db.session.query(
ReadingsEnergyClass.timestamp,
ReadingsEnergyClass.electricity_consumption,
).filter(
and_(
SensorClass.device_id == sensor_device_id,
ReadingsEnergyClass.sensor_id == SensorClass.id,
ReadingsEnergyClass.timestamp >= dt_from,
ReadingsEnergyClass.timestamp <= dt_to,
)
)
df = pd.read_sql(query.statement, query.session.bind)
if df.empty:
return pd.DataFrame({"date": [], "mean_lights_on": []})
# Reseting index
df.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True)
# grouping data by date-hour
energy_hour = (
df.groupby(
by=[
df["timestamp"].map(
lambda x: pd.to_datetime(
"%04d-%02d-%02d-%02d" % (x.year, x.month, x.day, x.hour),
format="%Y-%m-%d-%H",
)
),
]
)["electricity_consumption"]
.sum()
.reset_index()
)
# Sorting and reseting index
energy_hour.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True)
# energy dates. Energy date starts from 4pm each day and lasts for 24 hours
energy_hour.loc[
energy_hour["timestamp"].dt.hour < 15, "energy_date"
] = pd.to_datetime((energy_hour["timestamp"] + timedelta(days=-1)).dt.date)
energy_hour.loc[
energy_hour["timestamp"].dt.hour >= 15, "energy_date"
] = pd.to_datetime(energy_hour["timestamp"].dt.date)
# Clasification of lights being on
# Lights ON 1: Lights turn on at 4pm and turn off at 9am, as scheduled.
energy_hour["lights_on_1"] = energy_hour["timestamp"].apply(
lambda x: 1 if (x.hour >= 17 or x.hour < 10) else 0
)
lights_on_cols.append("lights_on_1")
# Lights ON 2: Lights are calculated by estimating the lighting use as between
# the minima of two consecutive days. The lights are considered on when the
# energy use is above the day's first quartile of lighting of this difference.
# energy_hour['lights_on_2'] = 0
# lights_on_cols.append('lights_on_2')
# Lights ON 3: Lights are assumed to be on if the energy demand is over 30 kW
# (max load of the extraction fan)
energy_hour["lights_on_3"] = energy_hour[col_ec].apply(
lambda x: 1 if (x > 30.0) else 0
)
lights_on_cols.append("lights_on_3")
# Lights ON 4: Lights are assumed to turn on at the time of largest energy use
# increase in the day, and turn off at the time of largest energy decrease of
# the day.
# estimating energy difference
energy_hour["dE"] = energy_hour[col_ec] - energy_hour[col_ec].shift(1)
energy_hour["dE"] = energy_hour["dE"].fillna(0.0)
# finding max increase and min decrease
energy_hour["dE_min"] = energy_hour.groupby("energy_date")["dE"].transform("min")
energy_hour["dE_max"] = energy_hour.groupby("energy_date")["dE"].transform("max")
energy_hour.loc[
np.isclose(energy_hour["dE_max"], energy_hour["dE"]), "lights_on_4"
] = 1
energy_hour.loc[
np.isclose(energy_hour["dE_min"], energy_hour["dE"]), "lights_on_4"
] = 0
# repeat last?
prev_row_value = None
for df_index in energy_hour.index:
if df_index > 0:
if np | prev_row_value = energy_hour.loc[df_index, "lights_on_4"]
lights_on_cols.append("lights_on_4")
# Lights ON 5: Lights are assumed on if the energy use is over 0.9
# times the days' energy use mean, and the energy demand is over 30 kW.
energy_hour["energy_date_mean"] = energy_hour.groupby("energy_date")[
col_ec
].transform("mean")
energy_hour["lights_on_5"] = np.where(
(energy_hour[col_ec] > 30.0)
& (energy_hour[col_ec] > 0.9 * energy_hour["energy_date_mean"]),
1,
0,
)
lights_on_cols.append("lights_on_5")
# getting the mean value of lights on per day
energy_date_df = energy_hour.loc[
(energy_hour["energy_date"] >= d_from) & (energy_hour["energy_date"] <= d_to)
]
energy_date_df = (
energy | .isnan(energy_hour.loc[df_index, "lights_on_4"]) and not np.isnan(
prev_row_value
):
energy_hour.loc[df_index, "lights_on_4"] = prev_row_value
| conditional_block |
routes.py | [sensor_type_name]
else:
value = lambda: None
DATA_TABLES_BY_SENSOR_TYPE[sensor_type_id] = value
return value()
def get_columns_by_sensor_type(sensor_type_id):
"""Return the names of the data columns in the table corresponding to a given sensor
type ID.
By "data columns" we mean the ones that depend on the sensor type and hold the
actual data, e.g. temperature and humidity, but not timestamp. The return values are
dictionaries with two keys, "column_name" for the name by which the database knows
this column, and "ui_name" for nice human-readable name fit for a UI.
"""
global DATA_COLUMNS_BY_SENSOR_TYPE
if sensor_type_id in DATA_COLUMNS_BY_SENSOR_TYPE:
return DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_id]
else:
sensor_type_name = get_sensor_type_name(sensor_type_id)
if sensor_type_name in DATA_COLUMNS_BY_SENSOR_TYPE:
value = DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_name]
else:
value = None
DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_id] = value
return value
def get_default_sensor_type():
"""Get the ID of the default sensor type."""
return get_sensor_type_id(DEFAULT_SENSOR_TYPE)
def is_valid_sensor_type(sensor_type_id):
"""Return True if we have the necessary metadata about the table and its columns
needed for fetching and plotting data for the given sensor type, otherwise False.
"""
return (
get_table_by_sensor_type(sensor_type_id) is not None
and get_columns_by_sensor_type(sensor_type_id) is not None
)
# # # DONE WITH GLOBAL CONSTANTS AND SENSOR TYPE METADATA, BEGIN MAIN CONTENT # # #
def resample(df, bins, dt_from, dt_to):
"""
Resamples (adds missing date/temperature bin combinations) to a dataframe.
Arguments:
df: dataframe with temperature assign to bins
bins: temperature bins as a list
dt_from: date range from
dt_to: date range to
Returns:
bins_list: a list of temperature bins
df_list: a list of df corresponding to temperature bins
"""
bins_list = []
for i in range(len(bins) - 1):
bins_list.append("(%.1f, %.1f]" % (bins[i], bins[i + 1]))
date_min = min(df["date"].min(), dt_from)
date_max = max(df["date"].max(), dt_to)
for n in range(int((date_max - date_min).days) + 1):
day = date_min + timedelta(n)
for temp_range in bins_list:
if len(df[(df["date"] == day) & (df["temp_bin"] == temp_range)].index) == 0:
df2 = pd.DataFrame(
{"date": [day], "temp_bin": [temp_range], "temp_cnt": [0]}
)
df = df.append(df2)
df = df.sort_values(by=["date", "temp_bin"], ascending=True)
df.reset_index(inplace=True, drop=True)
df_list = []
for bin_range in bins_list:
df_bin = df[df["temp_bin"] == bin_range]
del df_bin["temp_bin"]
df_bin.reset_index(inplace=True, drop=True)
df_list.append(df_bin)
return bins_list, df_list
def lights_energy_use(dt_from_, dt_to_):
"""
Energy use from Carpenter's place (with lights - called Clapham in the database)
Arguments:
dt_from_: date range from
dt_to_: date range to
Returns: | """
dt_from = pd.to_datetime(dt_from_.date()) + timedelta(hours=14)
dt_to = pd.to_datetime(dt_to_.date()) + timedelta(days=1, hours=15)
d_from = pd.to_datetime(dt_from_.date())
d_to = pd.to_datetime(dt_to_.date())
col_ec = "electricity_consumption"
sensor_device_id = "Clapham"
lights_on_cols = []
# getting eneregy data for the analysis
query = db.session.query(
ReadingsEnergyClass.timestamp,
ReadingsEnergyClass.electricity_consumption,
).filter(
and_(
SensorClass.device_id == sensor_device_id,
ReadingsEnergyClass.sensor_id == SensorClass.id,
ReadingsEnergyClass.timestamp >= dt_from,
ReadingsEnergyClass.timestamp <= dt_to,
)
)
df = pd.read_sql(query.statement, query.session.bind)
if df.empty:
return pd.DataFrame({"date": [], "mean_lights_on": []})
# Reseting index
df.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True)
# grouping data by date-hour
energy_hour = (
df.groupby(
by=[
df["timestamp"].map(
lambda x: pd.to_datetime(
"%04d-%02d-%02d-%02d" % (x.year, x.month, x.day, x.hour),
format="%Y-%m-%d-%H",
)
),
]
)["electricity_consumption"]
.sum()
.reset_index()
)
# Sorting and reseting index
energy_hour.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True)
# energy dates. Energy date starts from 4pm each day and lasts for 24 hours
energy_hour.loc[
energy_hour["timestamp"].dt.hour < 15, "energy_date"
] = pd.to_datetime((energy_hour["timestamp"] + timedelta(days=-1)).dt.date)
energy_hour.loc[
energy_hour["timestamp"].dt.hour >= 15, "energy_date"
] = pd.to_datetime(energy_hour["timestamp"].dt.date)
# Clasification of lights being on
# Lights ON 1: Lights turn on at 4pm and turn off at 9am, as scheduled.
energy_hour["lights_on_1"] = energy_hour["timestamp"].apply(
lambda x: 1 if (x.hour >= 17 or x.hour < 10) else 0
)
lights_on_cols.append("lights_on_1")
# Lights ON 2: Lights are calculated by estimating the lighting use as between
# the minima of two consecutive days. The lights are considered on when the
# energy use is above the day's first quartile of lighting of this difference.
# energy_hour['lights_on_2'] = 0
# lights_on_cols.append('lights_on_2')
# Lights ON 3: Lights are assumed to be on if the energy demand is over 30 kW
# (max load of the extraction fan)
energy_hour["lights_on_3"] = energy_hour[col_ec].apply(
lambda x: 1 if (x > 30.0) else 0
)
lights_on_cols.append("lights_on_3")
# Lights ON 4: Lights are assumed to turn on at the time of largest energy use
# increase in the day, and turn off at the time of largest energy decrease of
# the day.
# estimating energy difference
energy_hour["dE"] = energy_hour[col_ec] - energy_hour[col_ec].shift(1)
energy_hour["dE"] = energy_hour["dE"].fillna(0.0)
# finding max increase and min decrease
energy_hour["dE_min"] = energy_hour.groupby("energy_date")["dE"].transform("min")
energy_hour["dE_max"] = energy_hour.groupby("energy_date")["dE"].transform("max")
energy_hour.loc[
np.isclose(energy_hour["dE_max"], energy_hour["dE"]), "lights_on_4"
] = 1
energy_hour.loc[
np.isclose(energy_hour["dE_min"], energy_hour["dE"]), "lights_on_4"
] = 0
# repeat last?
prev_row_value = None
for df_index in energy_hour.index:
if df_index > 0:
if np.isnan(energy_hour.loc[df_index, "lights_on_4"]) and not np.isnan(
prev_row_value
):
energy_hour.loc[df_index, "lights_on_4"] = prev_row_value
prev_row_value = energy_hour.loc[df_index, "lights_on_4"]
lights_on_cols.append("lights_on_4")
# Lights ON 5: Lights are assumed on if the energy use is over 0.9
# times the days' energy use mean, and the energy demand is over 30 kW.
energy_hour["energy_date_mean"] = energy_hour.groupby("energy_date")[
col_ec
].transform("mean")
energy_hour["lights_on_5"] = np.where(
(energy_hour[col_ec] > 30.0)
& (energy_hour[col_ec] > 0.9 * energy_hour["energy_date_mean"]),
1,
0,
)
lights_on_cols.append("lights_on_5")
# getting the mean value of lights on per day
energy_date_df = energy_hour.loc[
(energy_hour["energy_date"] >= d_from) & (energy_hour["energy_date"] <= d_to)
]
energy_date_df = (
energy_date | lights_results_df - a pandas dataframe with mean lights on values | random_line_split |
main.rs | ,
keys::Keys,
merkledb::{BinaryValue, Snapshot, TemporaryDB},
runtime::{
migrations::{InitMigrationError, MigrationScript},
oneshot::Receiver,
versioning::Version,
AnyTx, ArtifactId, CallInfo, CommonError, ExecutionContext, ExecutionError, ExecutionFail,
InstanceDescriptor, InstanceId, InstanceState, InstanceStatus, Mailbox, MethodId, Runtime,
SnapshotExt, WellKnownRuntime, SUPERVISOR_INSTANCE_ID,
},
};
use exonum_derive::ExecutionFail;
use exonum_node::{NodeApiConfig, NodeBuilder, NodeConfig, ShutdownHandle};
use exonum_rust_runtime::{spec::Deploy, RustRuntime};
use exonum_supervisor::{ConfigPropose, DeployRequest, Supervisor, SupervisorInterface};
use futures::TryFutureExt;
use std::{cell::Cell, collections::BTreeMap, thread, time::Duration};
/// Service instance with a counter.
#[derive(Debug, Default)]
struct SampleService {
counter: Cell<u64>,
_name: String,
}
/// Sample runtime.
#[derive(Debug, Default)]
struct SampleRuntime {
deployed_artifacts: BTreeMap<ArtifactId, Vec<u8>>,
started_services: BTreeMap<InstanceId, SampleService>,
}
// Define runtime specific errors.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
#[derive(ExecutionFail)]
#[execution_fail(kind = "runtime")]
enum SampleRuntimeError {
/// Incorrect information to call transaction.
IncorrectCallInfo = 1,
/// Incorrect transaction payload.
IncorrectPayload = 2,
}
impl SampleRuntime {
/// Create a new service instance with the given specification.
fn start_service(
&self,
artifact: &ArtifactId,
instance: &InstanceDescriptor,
) -> Result<SampleService, ExecutionError> {
// Invariants guaranteed by the core.
assert!(self.deployed_artifacts.contains_key(artifact));
assert!(!self.started_services.contains_key(&instance.id));
Ok(SampleService {
_name: instance.name.to_owned(),
..SampleService::default()
})
}
/// In the present simplest case, the artifact is added into the deployed artifacts table.
fn deploy_artifact(
&mut self,
artifact: ArtifactId,
spec: Vec<u8>,
) -> Result<(), ExecutionError> {
// Invariant guaranteed by the core
assert!(!self.deployed_artifacts.contains_key(&artifact));
println!("Deploying artifact: {}", &artifact);
self.deployed_artifacts.insert(artifact, spec);
Ok(())
}
}
impl Runtime for SampleRuntime {
fn deploy_artifact(&mut self, artifact: ArtifactId, spec: Vec<u8>) -> Receiver |
fn is_artifact_deployed(&self, id: &ArtifactId) -> bool {
self.deployed_artifacts.contains_key(id)
}
/// Initiates adding a new service and sets the counter value for this.
fn initiate_adding_service(
&self,
context: ExecutionContext<'_>,
artifact: &ArtifactId,
params: Vec<u8>,
) -> Result<(), ExecutionError> {
let service_instance = self.start_service(artifact, context.instance())?;
let new_value = u64::from_bytes(params.into()).map_err(CommonError::malformed_arguments)?;
service_instance.counter.set(new_value);
println!(
"Initializing service {}: {} with value {}",
artifact,
context.instance(),
new_value
);
Ok(())
}
fn initiate_resuming_service(
&self,
_context: ExecutionContext<'_>,
_artifact: &ArtifactId,
_parameters: Vec<u8>,
) -> Result<(), ExecutionError> {
unreachable!("We don't resume services in this example.")
}
/// Commits status for the `SampleService` instance with the specified ID.
fn update_service_status(&mut self, _snapshot: &dyn Snapshot, state: &InstanceState) {
let spec = &state.spec;
match state.status {
Some(InstanceStatus::Active) => {
// Unwrap here is safe, since by invocation of this method
// `exonum` guarantees that `initiate_adding_service` was invoked
// before and it returned `Ok(..)`.
let instance = self
.start_service(&spec.artifact, &spec.as_descriptor())
.unwrap();
println!("Starting service {}: {:?}", spec, instance);
self.started_services.insert(spec.id, instance);
}
Some(InstanceStatus::Stopped) => {
let instance = self.started_services.remove(&spec.id);
println!("Stopping service {}: {:?}", spec, instance);
}
_ => {
// We aren't interested in other possible statuses.
}
}
}
fn migrate(
&self,
_new_artifact: &ArtifactId,
_data_version: &Version,
) -> Result<Option<MigrationScript>, InitMigrationError> {
Err(InitMigrationError::NotSupported)
}
fn execute(
&self,
context: ExecutionContext<'_>,
method_id: MethodId,
payload: &[u8],
) -> Result<(), ExecutionError> {
let service = self
.started_services
.get(&context.instance().id)
.ok_or(SampleRuntimeError::IncorrectCallInfo)?;
println!(
"Executing method {}#{} of service {}",
context.interface_name(),
method_id,
context.instance().id
);
const SERVICE_INTERFACE: &str = "";
match (context.interface_name(), method_id) {
// Increment counter.
(SERVICE_INTERFACE, 0) => {
let value = u64::from_bytes(payload.into())
.map_err(|e| SampleRuntimeError::IncorrectPayload.with_description(e))?;
let counter = service.counter.get();
println!("Updating counter value to {}", counter + value);
service.counter.set(value + counter);
Ok(())
}
// Reset counter.
(SERVICE_INTERFACE, 1) => {
if !payload.is_empty() {
Err(SampleRuntimeError::IncorrectPayload.into())
} else {
println!("Resetting counter");
service.counter.set(0);
Ok(())
}
}
// Unknown transaction.
(interface, method) => {
let err = SampleRuntimeError::IncorrectCallInfo.with_description(format!(
"Incorrect information to call transaction. {}#{}",
interface, method
));
Err(err)
}
}
}
fn before_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> {
Ok(())
}
fn after_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> {
Ok(())
}
fn after_commit(&mut self, _snapshot: &dyn Snapshot, _mailbox: &mut Mailbox) {}
}
impl From<SampleRuntime> for (u32, Box<dyn Runtime>) {
fn from(inner: SampleRuntime) -> Self {
(SampleRuntime::ID, Box::new(inner))
}
}
impl WellKnownRuntime for SampleRuntime {
const ID: u32 = 255;
}
fn node_config() -> (NodeConfig, Keys) {
let keys = Keys::random();
let validator_keys = vec![ValidatorKeys::new(keys.consensus_pk(), keys.service_pk())];
let consensus = ConsensusConfig::default().with_validator_keys(validator_keys);
let api_address = "0.0.0.0:8000".parse().unwrap();
let api_cfg = NodeApiConfig {
public_api_address: Some(api_address),
..Default::default()
};
let peer_address = "0.0.0.0:2000";
let node_config = NodeConfig {
listen_address: peer_address.parse().unwrap(),
consensus,
external_address: peer_address.to_owned(),
network: Default::default(),
connect_list: Default::default(),
api: api_cfg,
mempool: Default::default(),
thread_pool_size: Default::default(),
};
(node_config, keys)
}
async fn examine_runtime(blockchain: Blockchain, shutdown_handle: ShutdownHandle) {
let service_keypair = blockchain.service_keypair();
let deploy_height = Height(50);
// Send an artifact `DeployRequest` to the sample runtime.
let artifact = "255:sample_artifact:0.1.0".parse().unwrap();
let request = DeployRequest::new(artifact, deploy_height);
let tx = service_keypair.request_artifact_deploy(SUPERVISOR_INSTANCE_ID, request);
blockchain.sender().broadcast_transaction(tx).await.unwrap();
// Wait until the request is finished.
thread::sleep(Duration::from_secs(5));
// Send a `StartService` request to the sample runtime.
let instance_name = "instance";
let proposal = ConfigPropose::immediate(0).start_service(
"255:sample_artifact:0.1.0".parse().unwrap(),
instance_name,
10_u64,
);
let proposal = service_keypair.propose_config_change(SUPERVISOR_INSTANCE_ID, proposal);
blockchain
.sender()
.broadcast_transaction(proposal)
.await
.unwrap();
// Wait until instance identifier is assigned.
thread::sleep(Duration::from_secs(1));
// Get an instance identifier.
let snapshot = blockchain.snapshot();
let state = snapshot
.for_dispatcher()
| {
Receiver::with_result(self.deploy_artifact(artifact, spec))
} | identifier_body |
main.rs | ,
keys::Keys,
merkledb::{BinaryValue, Snapshot, TemporaryDB},
runtime::{
migrations::{InitMigrationError, MigrationScript},
oneshot::Receiver,
versioning::Version,
AnyTx, ArtifactId, CallInfo, CommonError, ExecutionContext, ExecutionError, ExecutionFail,
InstanceDescriptor, InstanceId, InstanceState, InstanceStatus, Mailbox, MethodId, Runtime,
SnapshotExt, WellKnownRuntime, SUPERVISOR_INSTANCE_ID,
},
};
use exonum_derive::ExecutionFail;
use exonum_node::{NodeApiConfig, NodeBuilder, NodeConfig, ShutdownHandle};
use exonum_rust_runtime::{spec::Deploy, RustRuntime};
use exonum_supervisor::{ConfigPropose, DeployRequest, Supervisor, SupervisorInterface};
use futures::TryFutureExt;
use std::{cell::Cell, collections::BTreeMap, thread, time::Duration};
/// Service instance with a counter.
#[derive(Debug, Default)]
struct SampleService {
counter: Cell<u64>,
_name: String,
}
/// Sample runtime.
#[derive(Debug, Default)]
struct SampleRuntime {
deployed_artifacts: BTreeMap<ArtifactId, Vec<u8>>,
started_services: BTreeMap<InstanceId, SampleService>,
}
// Define runtime specific errors.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
#[derive(ExecutionFail)]
#[execution_fail(kind = "runtime")]
enum SampleRuntimeError {
/// Incorrect information to call transaction.
IncorrectCallInfo = 1,
/// Incorrect transaction payload.
IncorrectPayload = 2,
}
impl SampleRuntime {
/// Create a new service instance with the given specification.
fn start_service(
&self,
artifact: &ArtifactId,
instance: &InstanceDescriptor,
) -> Result<SampleService, ExecutionError> {
// Invariants guaranteed by the core.
assert!(self.deployed_artifacts.contains_key(artifact));
assert!(!self.started_services.contains_key(&instance.id));
Ok(SampleService {
_name: instance.name.to_owned(),
..SampleService::default()
})
}
/// In the present simplest case, the artifact is added into the deployed artifacts table.
fn deploy_artifact(
&mut self,
artifact: ArtifactId,
spec: Vec<u8>,
) -> Result<(), ExecutionError> {
// Invariant guaranteed by the core
assert!(!self.deployed_artifacts.contains_key(&artifact));
println!("Deploying artifact: {}", &artifact);
self.deployed_artifacts.insert(artifact, spec);
Ok(())
}
}
impl Runtime for SampleRuntime {
fn deploy_artifact(&mut self, artifact: ArtifactId, spec: Vec<u8>) -> Receiver {
Receiver::with_result(self.deploy_artifact(artifact, spec))
}
fn is_artifact_deployed(&self, id: &ArtifactId) -> bool {
self.deployed_artifacts.contains_key(id)
}
/// Initiates adding a new service and sets the counter value for this.
fn initiate_adding_service(
&self,
context: ExecutionContext<'_>,
artifact: &ArtifactId,
params: Vec<u8>,
) -> Result<(), ExecutionError> {
let service_instance = self.start_service(artifact, context.instance())?;
let new_value = u64::from_bytes(params.into()).map_err(CommonError::malformed_arguments)?;
service_instance.counter.set(new_value);
println!(
"Initializing service {}: {} with value {}",
artifact,
context.instance(),
new_value
);
Ok(())
}
fn initiate_resuming_service(
&self,
_context: ExecutionContext<'_>,
_artifact: &ArtifactId,
_parameters: Vec<u8>,
) -> Result<(), ExecutionError> {
unreachable!("We don't resume services in this example.")
}
/// Commits status for the `SampleService` instance with the specified ID.
fn update_service_status(&mut self, _snapshot: &dyn Snapshot, state: &InstanceState) {
let spec = &state.spec;
match state.status {
Some(InstanceStatus::Active) => |
Some(InstanceStatus::Stopped) => {
let instance = self.started_services.remove(&spec.id);
println!("Stopping service {}: {:?}", spec, instance);
}
_ => {
// We aren't interested in other possible statuses.
}
}
}
fn migrate(
&self,
_new_artifact: &ArtifactId,
_data_version: &Version,
) -> Result<Option<MigrationScript>, InitMigrationError> {
Err(InitMigrationError::NotSupported)
}
fn execute(
&self,
context: ExecutionContext<'_>,
method_id: MethodId,
payload: &[u8],
) -> Result<(), ExecutionError> {
let service = self
.started_services
.get(&context.instance().id)
.ok_or(SampleRuntimeError::IncorrectCallInfo)?;
println!(
"Executing method {}#{} of service {}",
context.interface_name(),
method_id,
context.instance().id
);
const SERVICE_INTERFACE: &str = "";
match (context.interface_name(), method_id) {
// Increment counter.
(SERVICE_INTERFACE, 0) => {
let value = u64::from_bytes(payload.into())
.map_err(|e| SampleRuntimeError::IncorrectPayload.with_description(e))?;
let counter = service.counter.get();
println!("Updating counter value to {}", counter + value);
service.counter.set(value + counter);
Ok(())
}
// Reset counter.
(SERVICE_INTERFACE, 1) => {
if !payload.is_empty() {
Err(SampleRuntimeError::IncorrectPayload.into())
} else {
println!("Resetting counter");
service.counter.set(0);
Ok(())
}
}
// Unknown transaction.
(interface, method) => {
let err = SampleRuntimeError::IncorrectCallInfo.with_description(format!(
"Incorrect information to call transaction. {}#{}",
interface, method
));
Err(err)
}
}
}
fn before_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> {
Ok(())
}
fn after_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> {
Ok(())
}
fn after_commit(&mut self, _snapshot: &dyn Snapshot, _mailbox: &mut Mailbox) {}
}
impl From<SampleRuntime> for (u32, Box<dyn Runtime>) {
fn from(inner: SampleRuntime) -> Self {
(SampleRuntime::ID, Box::new(inner))
}
}
impl WellKnownRuntime for SampleRuntime {
const ID: u32 = 255;
}
fn node_config() -> (NodeConfig, Keys) {
let keys = Keys::random();
let validator_keys = vec![ValidatorKeys::new(keys.consensus_pk(), keys.service_pk())];
let consensus = ConsensusConfig::default().with_validator_keys(validator_keys);
let api_address = "0.0.0.0:8000".parse().unwrap();
let api_cfg = NodeApiConfig {
public_api_address: Some(api_address),
..Default::default()
};
let peer_address = "0.0.0.0:2000";
let node_config = NodeConfig {
listen_address: peer_address.parse().unwrap(),
consensus,
external_address: peer_address.to_owned(),
network: Default::default(),
connect_list: Default::default(),
api: api_cfg,
mempool: Default::default(),
thread_pool_size: Default::default(),
};
(node_config, keys)
}
async fn examine_runtime(blockchain: Blockchain, shutdown_handle: ShutdownHandle) {
let service_keypair = blockchain.service_keypair();
let deploy_height = Height(50);
// Send an artifact `DeployRequest` to the sample runtime.
let artifact = "255:sample_artifact:0.1.0".parse().unwrap();
let request = DeployRequest::new(artifact, deploy_height);
let tx = service_keypair.request_artifact_deploy(SUPERVISOR_INSTANCE_ID, request);
blockchain.sender().broadcast_transaction(tx).await.unwrap();
// Wait until the request is finished.
thread::sleep(Duration::from_secs(5));
// Send a `StartService` request to the sample runtime.
let instance_name = "instance";
let proposal = ConfigPropose::immediate(0).start_service(
"255:sample_artifact:0.1.0".parse().unwrap(),
instance_name,
10_u64,
);
let proposal = service_keypair.propose_config_change(SUPERVISOR_INSTANCE_ID, proposal);
blockchain
.sender()
.broadcast_transaction(proposal)
.await
.unwrap();
// Wait until instance identifier is assigned.
thread::sleep(Duration::from_secs(1));
// Get an instance identifier.
let snapshot = blockchain.snapshot();
let state = snapshot
.for_dispatcher()
| {
// Unwrap here is safe, since by invocation of this method
// `exonum` guarantees that `initiate_adding_service` was invoked
// before and it returned `Ok(..)`.
let instance = self
.start_service(&spec.artifact, &spec.as_descriptor())
.unwrap();
println!("Starting service {}: {:?}", spec, instance);
self.started_services.insert(spec.id, instance);
} | conditional_block |
main.rs | Height,
keys::Keys,
merkledb::{BinaryValue, Snapshot, TemporaryDB},
runtime::{
migrations::{InitMigrationError, MigrationScript},
oneshot::Receiver,
versioning::Version,
AnyTx, ArtifactId, CallInfo, CommonError, ExecutionContext, ExecutionError, ExecutionFail,
InstanceDescriptor, InstanceId, InstanceState, InstanceStatus, Mailbox, MethodId, Runtime,
SnapshotExt, WellKnownRuntime, SUPERVISOR_INSTANCE_ID,
},
};
use exonum_derive::ExecutionFail;
use exonum_node::{NodeApiConfig, NodeBuilder, NodeConfig, ShutdownHandle};
use exonum_rust_runtime::{spec::Deploy, RustRuntime};
use exonum_supervisor::{ConfigPropose, DeployRequest, Supervisor, SupervisorInterface};
use futures::TryFutureExt;
use std::{cell::Cell, collections::BTreeMap, thread, time::Duration};
/// Service instance with a counter.
#[derive(Debug, Default)]
struct SampleService {
counter: Cell<u64>,
_name: String,
}
/// Sample runtime.
#[derive(Debug, Default)]
struct SampleRuntime {
deployed_artifacts: BTreeMap<ArtifactId, Vec<u8>>,
started_services: BTreeMap<InstanceId, SampleService>,
}
// Define runtime specific errors.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
#[derive(ExecutionFail)]
#[execution_fail(kind = "runtime")]
enum SampleRuntimeError {
/// Incorrect information to call transaction.
IncorrectCallInfo = 1,
/// Incorrect transaction payload.
IncorrectPayload = 2,
}
impl SampleRuntime {
/// Create a new service instance with the given specification.
fn start_service(
&self,
artifact: &ArtifactId,
instance: &InstanceDescriptor,
) -> Result<SampleService, ExecutionError> {
// Invariants guaranteed by the core.
assert!(self.deployed_artifacts.contains_key(artifact));
assert!(!self.started_services.contains_key(&instance.id));
Ok(SampleService { | })
}
/// In the present simplest case, the artifact is added into the deployed artifacts table.
fn deploy_artifact(
&mut self,
artifact: ArtifactId,
spec: Vec<u8>,
) -> Result<(), ExecutionError> {
// Invariant guaranteed by the core
assert!(!self.deployed_artifacts.contains_key(&artifact));
println!("Deploying artifact: {}", &artifact);
self.deployed_artifacts.insert(artifact, spec);
Ok(())
}
}
impl Runtime for SampleRuntime {
fn deploy_artifact(&mut self, artifact: ArtifactId, spec: Vec<u8>) -> Receiver {
Receiver::with_result(self.deploy_artifact(artifact, spec))
}
fn is_artifact_deployed(&self, id: &ArtifactId) -> bool {
self.deployed_artifacts.contains_key(id)
}
/// Initiates adding a new service and sets the counter value for this.
fn initiate_adding_service(
&self,
context: ExecutionContext<'_>,
artifact: &ArtifactId,
params: Vec<u8>,
) -> Result<(), ExecutionError> {
let service_instance = self.start_service(artifact, context.instance())?;
let new_value = u64::from_bytes(params.into()).map_err(CommonError::malformed_arguments)?;
service_instance.counter.set(new_value);
println!(
"Initializing service {}: {} with value {}",
artifact,
context.instance(),
new_value
);
Ok(())
}
fn initiate_resuming_service(
&self,
_context: ExecutionContext<'_>,
_artifact: &ArtifactId,
_parameters: Vec<u8>,
) -> Result<(), ExecutionError> {
unreachable!("We don't resume services in this example.")
}
/// Commits status for the `SampleService` instance with the specified ID.
fn update_service_status(&mut self, _snapshot: &dyn Snapshot, state: &InstanceState) {
let spec = &state.spec;
match state.status {
Some(InstanceStatus::Active) => {
// Unwrap here is safe, since by invocation of this method
// `exonum` guarantees that `initiate_adding_service` was invoked
// before and it returned `Ok(..)`.
let instance = self
.start_service(&spec.artifact, &spec.as_descriptor())
.unwrap();
println!("Starting service {}: {:?}", spec, instance);
self.started_services.insert(spec.id, instance);
}
Some(InstanceStatus::Stopped) => {
let instance = self.started_services.remove(&spec.id);
println!("Stopping service {}: {:?}", spec, instance);
}
_ => {
// We aren't interested in other possible statuses.
}
}
}
fn migrate(
&self,
_new_artifact: &ArtifactId,
_data_version: &Version,
) -> Result<Option<MigrationScript>, InitMigrationError> {
Err(InitMigrationError::NotSupported)
}
fn execute(
&self,
context: ExecutionContext<'_>,
method_id: MethodId,
payload: &[u8],
) -> Result<(), ExecutionError> {
let service = self
.started_services
.get(&context.instance().id)
.ok_or(SampleRuntimeError::IncorrectCallInfo)?;
println!(
"Executing method {}#{} of service {}",
context.interface_name(),
method_id,
context.instance().id
);
const SERVICE_INTERFACE: &str = "";
match (context.interface_name(), method_id) {
// Increment counter.
(SERVICE_INTERFACE, 0) => {
let value = u64::from_bytes(payload.into())
.map_err(|e| SampleRuntimeError::IncorrectPayload.with_description(e))?;
let counter = service.counter.get();
println!("Updating counter value to {}", counter + value);
service.counter.set(value + counter);
Ok(())
}
// Reset counter.
(SERVICE_INTERFACE, 1) => {
if !payload.is_empty() {
Err(SampleRuntimeError::IncorrectPayload.into())
} else {
println!("Resetting counter");
service.counter.set(0);
Ok(())
}
}
// Unknown transaction.
(interface, method) => {
let err = SampleRuntimeError::IncorrectCallInfo.with_description(format!(
"Incorrect information to call transaction. {}#{}",
interface, method
));
Err(err)
}
}
}
fn before_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> {
Ok(())
}
fn after_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> {
Ok(())
}
fn after_commit(&mut self, _snapshot: &dyn Snapshot, _mailbox: &mut Mailbox) {}
}
impl From<SampleRuntime> for (u32, Box<dyn Runtime>) {
fn from(inner: SampleRuntime) -> Self {
(SampleRuntime::ID, Box::new(inner))
}
}
impl WellKnownRuntime for SampleRuntime {
const ID: u32 = 255;
}
fn node_config() -> (NodeConfig, Keys) {
let keys = Keys::random();
let validator_keys = vec![ValidatorKeys::new(keys.consensus_pk(), keys.service_pk())];
let consensus = ConsensusConfig::default().with_validator_keys(validator_keys);
let api_address = "0.0.0.0:8000".parse().unwrap();
let api_cfg = NodeApiConfig {
public_api_address: Some(api_address),
..Default::default()
};
let peer_address = "0.0.0.0:2000";
let node_config = NodeConfig {
listen_address: peer_address.parse().unwrap(),
consensus,
external_address: peer_address.to_owned(),
network: Default::default(),
connect_list: Default::default(),
api: api_cfg,
mempool: Default::default(),
thread_pool_size: Default::default(),
};
(node_config, keys)
}
async fn examine_runtime(blockchain: Blockchain, shutdown_handle: ShutdownHandle) {
let service_keypair = blockchain.service_keypair();
let deploy_height = Height(50);
// Send an artifact `DeployRequest` to the sample runtime.
let artifact = "255:sample_artifact:0.1.0".parse().unwrap();
let request = DeployRequest::new(artifact, deploy_height);
let tx = service_keypair.request_artifact_deploy(SUPERVISOR_INSTANCE_ID, request);
blockchain.sender().broadcast_transaction(tx).await.unwrap();
// Wait until the request is finished.
thread::sleep(Duration::from_secs(5));
// Send a `StartService` request to the sample runtime.
let instance_name = "instance";
let proposal = ConfigPropose::immediate(0).start_service(
"255:sample_artifact:0.1.0".parse().unwrap(),
instance_name,
10_u64,
);
let proposal = service_keypair.propose_config_change(SUPERVISOR_INSTANCE_ID, proposal);
blockchain
.sender()
.broadcast_transaction(proposal)
.await
.unwrap();
// Wait until instance identifier is assigned.
thread::sleep(Duration::from_secs(1));
// Get an instance identifier.
let snapshot = blockchain.snapshot();
let state = snapshot
.for_dispatcher()
| _name: instance.name.to_owned(),
..SampleService::default() | random_line_split |
main.rs | ,
keys::Keys,
merkledb::{BinaryValue, Snapshot, TemporaryDB},
runtime::{
migrations::{InitMigrationError, MigrationScript},
oneshot::Receiver,
versioning::Version,
AnyTx, ArtifactId, CallInfo, CommonError, ExecutionContext, ExecutionError, ExecutionFail,
InstanceDescriptor, InstanceId, InstanceState, InstanceStatus, Mailbox, MethodId, Runtime,
SnapshotExt, WellKnownRuntime, SUPERVISOR_INSTANCE_ID,
},
};
use exonum_derive::ExecutionFail;
use exonum_node::{NodeApiConfig, NodeBuilder, NodeConfig, ShutdownHandle};
use exonum_rust_runtime::{spec::Deploy, RustRuntime};
use exonum_supervisor::{ConfigPropose, DeployRequest, Supervisor, SupervisorInterface};
use futures::TryFutureExt;
use std::{cell::Cell, collections::BTreeMap, thread, time::Duration};
/// Service instance with a counter.
#[derive(Debug, Default)]
struct SampleService {
counter: Cell<u64>,
_name: String,
}
/// Sample runtime.
#[derive(Debug, Default)]
struct SampleRuntime {
deployed_artifacts: BTreeMap<ArtifactId, Vec<u8>>,
started_services: BTreeMap<InstanceId, SampleService>,
}
// Define runtime specific errors.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
#[derive(ExecutionFail)]
#[execution_fail(kind = "runtime")]
enum SampleRuntimeError {
/// Incorrect information to call transaction.
IncorrectCallInfo = 1,
/// Incorrect transaction payload.
IncorrectPayload = 2,
}
impl SampleRuntime {
/// Create a new service instance with the given specification.
fn start_service(
&self,
artifact: &ArtifactId,
instance: &InstanceDescriptor,
) -> Result<SampleService, ExecutionError> {
// Invariants guaranteed by the core.
assert!(self.deployed_artifacts.contains_key(artifact));
assert!(!self.started_services.contains_key(&instance.id));
Ok(SampleService {
_name: instance.name.to_owned(),
..SampleService::default()
})
}
/// In the present simplest case, the artifact is added into the deployed artifacts table.
fn deploy_artifact(
&mut self,
artifact: ArtifactId,
spec: Vec<u8>,
) -> Result<(), ExecutionError> {
// Invariant guaranteed by the core
assert!(!self.deployed_artifacts.contains_key(&artifact));
println!("Deploying artifact: {}", &artifact);
self.deployed_artifacts.insert(artifact, spec);
Ok(())
}
}
impl Runtime for SampleRuntime {
fn deploy_artifact(&mut self, artifact: ArtifactId, spec: Vec<u8>) -> Receiver {
Receiver::with_result(self.deploy_artifact(artifact, spec))
}
fn is_artifact_deployed(&self, id: &ArtifactId) -> bool {
self.deployed_artifacts.contains_key(id)
}
/// Initiates adding a new service and sets the counter value for this.
fn initiate_adding_service(
&self,
context: ExecutionContext<'_>,
artifact: &ArtifactId,
params: Vec<u8>,
) -> Result<(), ExecutionError> {
let service_instance = self.start_service(artifact, context.instance())?;
let new_value = u64::from_bytes(params.into()).map_err(CommonError::malformed_arguments)?;
service_instance.counter.set(new_value);
println!(
"Initializing service {}: {} with value {}",
artifact,
context.instance(),
new_value
);
Ok(())
}
fn initiate_resuming_service(
&self,
_context: ExecutionContext<'_>,
_artifact: &ArtifactId,
_parameters: Vec<u8>,
) -> Result<(), ExecutionError> {
unreachable!("We don't resume services in this example.")
}
/// Commits status for the `SampleService` instance with the specified ID.
fn update_service_status(&mut self, _snapshot: &dyn Snapshot, state: &InstanceState) {
let spec = &state.spec;
match state.status {
Some(InstanceStatus::Active) => {
// Unwrap here is safe, since by invocation of this method
// `exonum` guarantees that `initiate_adding_service` was invoked
// before and it returned `Ok(..)`.
let instance = self
.start_service(&spec.artifact, &spec.as_descriptor())
.unwrap();
println!("Starting service {}: {:?}", spec, instance);
self.started_services.insert(spec.id, instance);
}
Some(InstanceStatus::Stopped) => {
let instance = self.started_services.remove(&spec.id);
println!("Stopping service {}: {:?}", spec, instance);
}
_ => {
// We aren't interested in other possible statuses.
}
}
}
fn migrate(
&self,
_new_artifact: &ArtifactId,
_data_version: &Version,
) -> Result<Option<MigrationScript>, InitMigrationError> {
Err(InitMigrationError::NotSupported)
}
fn execute(
&self,
context: ExecutionContext<'_>,
method_id: MethodId,
payload: &[u8],
) -> Result<(), ExecutionError> {
let service = self
.started_services
.get(&context.instance().id)
.ok_or(SampleRuntimeError::IncorrectCallInfo)?;
println!(
"Executing method {}#{} of service {}",
context.interface_name(),
method_id,
context.instance().id
);
const SERVICE_INTERFACE: &str = "";
match (context.interface_name(), method_id) {
// Increment counter.
(SERVICE_INTERFACE, 0) => {
let value = u64::from_bytes(payload.into())
.map_err(|e| SampleRuntimeError::IncorrectPayload.with_description(e))?;
let counter = service.counter.get();
println!("Updating counter value to {}", counter + value);
service.counter.set(value + counter);
Ok(())
}
// Reset counter.
(SERVICE_INTERFACE, 1) => {
if !payload.is_empty() {
Err(SampleRuntimeError::IncorrectPayload.into())
} else {
println!("Resetting counter");
service.counter.set(0);
Ok(())
}
}
// Unknown transaction.
(interface, method) => {
let err = SampleRuntimeError::IncorrectCallInfo.with_description(format!(
"Incorrect information to call transaction. {}#{}",
interface, method
));
Err(err)
}
}
}
fn before_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> {
Ok(())
}
fn after_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> {
Ok(())
}
fn | (&mut self, _snapshot: &dyn Snapshot, _mailbox: &mut Mailbox) {}
}
impl From<SampleRuntime> for (u32, Box<dyn Runtime>) {
fn from(inner: SampleRuntime) -> Self {
(SampleRuntime::ID, Box::new(inner))
}
}
impl WellKnownRuntime for SampleRuntime {
const ID: u32 = 255;
}
fn node_config() -> (NodeConfig, Keys) {
let keys = Keys::random();
let validator_keys = vec![ValidatorKeys::new(keys.consensus_pk(), keys.service_pk())];
let consensus = ConsensusConfig::default().with_validator_keys(validator_keys);
let api_address = "0.0.0.0:8000".parse().unwrap();
let api_cfg = NodeApiConfig {
public_api_address: Some(api_address),
..Default::default()
};
let peer_address = "0.0.0.0:2000";
let node_config = NodeConfig {
listen_address: peer_address.parse().unwrap(),
consensus,
external_address: peer_address.to_owned(),
network: Default::default(),
connect_list: Default::default(),
api: api_cfg,
mempool: Default::default(),
thread_pool_size: Default::default(),
};
(node_config, keys)
}
async fn examine_runtime(blockchain: Blockchain, shutdown_handle: ShutdownHandle) {
let service_keypair = blockchain.service_keypair();
let deploy_height = Height(50);
// Send an artifact `DeployRequest` to the sample runtime.
let artifact = "255:sample_artifact:0.1.0".parse().unwrap();
let request = DeployRequest::new(artifact, deploy_height);
let tx = service_keypair.request_artifact_deploy(SUPERVISOR_INSTANCE_ID, request);
blockchain.sender().broadcast_transaction(tx).await.unwrap();
// Wait until the request is finished.
thread::sleep(Duration::from_secs(5));
// Send a `StartService` request to the sample runtime.
let instance_name = "instance";
let proposal = ConfigPropose::immediate(0).start_service(
"255:sample_artifact:0.1.0".parse().unwrap(),
instance_name,
10_u64,
);
let proposal = service_keypair.propose_config_change(SUPERVISOR_INSTANCE_ID, proposal);
blockchain
.sender()
.broadcast_transaction(proposal)
.await
.unwrap();
// Wait until instance identifier is assigned.
thread::sleep(Duration::from_secs(1));
// Get an instance identifier.
let snapshot = blockchain.snapshot();
let state = snapshot
.for_dispatcher()
| after_commit | identifier_name |
index.js | n;
for(let input of tx.inputs)
capacityInputs += BigInt(input.cell_output.capacity);
for(let output of tx.outputs)
capacityOutputs += BigInt(output.cell_output.capacity);
if(capacityInputs - capacityOutputs > ckbytesToShannons(1))
throw new Error(`Transaction fee too high: ${formattedNumber(shannonsToCkbytes(capacityInputs - capacityOutputs))} CKBytes. A normal transaction fee is < 1 CKByte.`);
}
/**
* Collects Cells for use as capacity from the specified lock script.
*
* This will search for Cells with at least capacityRequired. If there is insufficient capacity available an error will be thrown.
*
* @example
* const {inputCells, inputCapacity} = await collectCapacity(indexer, addressToScript("ckt1qyqvsv5240xeh85wvnau2eky8pwrhh4jr8ts8vyj37"), ckbytesToShannons(100n));
*
* @param {Object} indexer An instance of a running Lumos Indexer.
* @param {Object} lockScript A script used to query the CellCollector to find Cells to use as capacity.
* @param {BigInt} capacityRequired The number of CKBytes needed.
*
* @returns {Object} An object with the inputCells[] found and the inputCapacity contained within the provided Cells.
*/
async function collectCapacity(indexer, lockScript, capacityRequired)
{
const query = {lock: lockScript, type: "empty"};
const cellCollector = new CellCollector(indexer, query);
let inputCells = [];
let inputCapacity = 0n;
for await (const cell of cellCollector.collect())
{
inputCells.push(cell);
inputCapacity += hexToInt(cell.cell_output.capacity);
if(inputCapacity >= capacityRequired)
break;
}
if(inputCapacity < capacityRequired)
throw new Error("Unable to collect enough cells to fulfill the capacity requirements.");
return {inputCells, inputCapacity};
}
/**
* Collects Cells for use as capacity from the specified lock script.
*
* This will search for Cells with at least capacityRequired. If there is insufficient capacity available an error will be thrown.
*
* @example
* const {inputCells, inputCapacity} = await collectCapacity(indexer, addressToScript("ckt1qyqvsv5240xeh85wvnau2eky8pwrhh4jr8ts8vyj37"), ckbytesToShannons(100n));
*
* @param {Object} indexer An instance of a running Lumos Indexer.
* @param {Object} lockScript A lock script used to query the CellCollector to find Cells to use as capacity.
* @param {Object} typeScript A type script used to query the CellCollector to find Cells to use as capacity.
* @param {BigInt} capacityRequired The number of CKBytes needed.
*
* @returns {Object} An object with the inputCells[] found and the inputCapacity contained within the provided Cells.
*/
async function collectCapacityWithType(indexer, lockScript, typeScript, capacityRequired)
{
const query = {lock: lockScript, type: typeScript};
const cellCollector = new CellCollector(indexer, query);
let inputCells = [];
let inputCapacity = 0n;
for await (const cell of cellCollector.collect())
{
inputCells.push(cell);
inputCapacity += hexToInt(cell.cell_output.capacity);
if(inputCapacity >= capacityRequired)
break;
}
if(inputCapacity < capacityRequired)
throw new Error("Unable to collect enough cells to fulfill the capacity requirements.");
return {inputCells, inputCapacity};
}
function describeTransaction(transaction, options)
{
const defaults =
{
showCellDeps: true,
showInputs: true,
showInputCapacity: true,
showInputData: false,
showInputLock: true,
showInputType: true,
showInputOutPoint: true,
showOutputs: true,
showOutputCapacity: true,
showOutputData: false,
showOutputLock: true,
showOutputType: true,
showWitnesses: true,
showTxFee: true
};
options = {...defaults, ...options};
let obj =
{
deps: [],
inputs: [],
outputs: [],
witnesses: []
};
for(const dep of transaction.cellDeps)
{
let cell =
{
dep_type: dep.dep_type,
out_point: dep.out_point.tx_hash + "-" + dep.out_point.index
};
obj.deps.push(cell);
| {
capacity: formattedNumber(hexToInt(input.cell_output.capacity)) + " Shannons",
capacityCkbytes: formattedNumber((Number(hexToInt(input.cell_output.capacity)) / 100_000_000), 4) + " CKBytes",
lock: new ScriptValue(input.cell_output.lock).hash(),
type: (!!input.cell_output.type) ? new ScriptValue(input.cell_output.type).hash() : null,
out_point: input.out_point.tx_hash + "-" + input.out_point.index,
data: input.data
};
obj.inputs.push(cell);
}
for(const output of transaction.outputs)
{
let cell =
{
capacity: formattedNumber(hexToInt(output.cell_output.capacity)) + " Shannons",
capacityCkbytes: formattedNumber((Number(hexToInt(output.cell_output.capacity)) / 100_000_000), 4) + " CKBytes",
lock: new ScriptValue(output.cell_output.lock).hash(),
type: (!!output.cell_output.type) ? new ScriptValue(output.cell_output.type).hash() : null,
data: output.data
};
obj.outputs.push(cell);
}
obj.witnesses = transaction.witnesses;
if(options.showCellDeps)
{
console.log("Cell Deps:");
for(const dep of obj.deps)
{
console.log(" - dep_type: " + dep.dep_type);
console.log(" out_point: " + dep.out_point);
}
}
if(options.showInputs)
{
console.log("Inputs:");
for(const input of obj.inputs)
{
if(options.showInputCapacity)
console.log(" - capacity: " + input.capacity + ` (${input.capacityCkbytes})`);
if(options.showInputLock)
console.log(" lock: " + input.lock);
if(options.showInputType)
console.log(" type: " + input.type);
if(options.showInputOutPoint)
console.log(" out_point: " + input.out_point);
if(options.showInputData)
{
const data = (input.data.length > 66) ? input.data.substr(0, 33) + "..." + input.data.substr(input.data.length - 30) : input.data;
const dataBytes = (data.length > 2) ? (input.data.length-2)/2 : 0;
console.log(` data: ${data} (${formattedNumber(dataBytes)} Bytes)`);
}
}
}
if(options.showOutputs)
{
console.log("Outputs:");
for(const output of obj.outputs)
{
if(options.showOutputCapacity)
console.log(" - capacity: " + output.capacity + ` (${output.capacityCkbytes})`);
if(options.showOutputLock)
console.log(" lock: " + output.lock);
if(options.showOutputType)
console.log(" type: " + output.type);
if(options.showOutputData)
{
const data = (output.data.length > 66) ? output.data.substr(0, 33) + "..." + output.data.substr(output.data.length - 30) : output.data;
const dataBytes = (data.length > 2) ? (output.data.length-2)/2 : 0;
console.log(` data: ${data} (${formattedNumber(dataBytes)} Bytes)`);
}
}
}
if(options.showWitnesses)
{
console.log("Witnesses:");
for(const witness of obj.witnesses)
{
console.log(" - " + witness);
}
}
if(options.showTxFee)
{
const inputCapacity = transaction.inputs.reduce((a, c)=>a+hexToInt(c.cell_output.capacity), 0n);
const outputCapacity = transaction.outputs.reduce((a, c)=>a+hexToInt(c.cell_output.capacity), 0n);
console.log(`TX Fee: ${formattedNumber(inputCapacity - outputCapacity)} Shannons`)
}
console.log();
}
async function getLiveCell(nodeUrl, outPoint, returnData = false)
{
const rpc = new RPC(nodeUrl);
const res = await rpc.get_live_cell({tx_hash: outPoint.tx_hash, index: outPoint.index}, returnData);
if(res.status === "dead")
throw new Error(`Dead cell found at out point: ${outPoint.tx_hash}-${outPoint.index}`);
if(res.status !== "live")
throw new Error(`Live cell not found at out point: ${outPoint.tx_hash}-${outPoint.index}`);
const cell =
{
cell_output:
{
capacity: res.cell.output.capacity,
lock: {code_hash: res.cell.output.lock.code_hash, hash_type: res.cell.output.lock.hash | }
for(const input of transaction.inputs)
{
let cell =
| random_line_split |
index.js | number of CKBytes needed.
*
* @returns {Object} An object with the inputCells[] found and the inputCapacity contained within the provided Cells.
*/
async function collectCapacityWithType(indexer, lockScript, typeScript, capacityRequired)
{
const query = {lock: lockScript, type: typeScript};
const cellCollector = new CellCollector(indexer, query);
let inputCells = [];
let inputCapacity = 0n;
for await (const cell of cellCollector.collect())
{
inputCells.push(cell);
inputCapacity += hexToInt(cell.cell_output.capacity);
if(inputCapacity >= capacityRequired)
break;
}
if(inputCapacity < capacityRequired)
throw new Error("Unable to collect enough cells to fulfill the capacity requirements.");
return {inputCells, inputCapacity};
}
function describeTransaction(transaction, options)
{
const defaults =
{
showCellDeps: true,
showInputs: true,
showInputCapacity: true,
showInputData: false,
showInputLock: true,
showInputType: true,
showInputOutPoint: true,
showOutputs: true,
showOutputCapacity: true,
showOutputData: false,
showOutputLock: true,
showOutputType: true,
showWitnesses: true,
showTxFee: true
};
options = {...defaults, ...options};
let obj =
{
deps: [],
inputs: [],
outputs: [],
witnesses: []
};
for(const dep of transaction.cellDeps)
{
let cell =
{
dep_type: dep.dep_type,
out_point: dep.out_point.tx_hash + "-" + dep.out_point.index
};
obj.deps.push(cell);
}
for(const input of transaction.inputs)
{
let cell =
{
capacity: formattedNumber(hexToInt(input.cell_output.capacity)) + " Shannons",
capacityCkbytes: formattedNumber((Number(hexToInt(input.cell_output.capacity)) / 100_000_000), 4) + " CKBytes",
lock: new ScriptValue(input.cell_output.lock).hash(),
type: (!!input.cell_output.type) ? new ScriptValue(input.cell_output.type).hash() : null,
out_point: input.out_point.tx_hash + "-" + input.out_point.index,
data: input.data
};
obj.inputs.push(cell);
}
for(const output of transaction.outputs)
{
let cell =
{
capacity: formattedNumber(hexToInt(output.cell_output.capacity)) + " Shannons",
capacityCkbytes: formattedNumber((Number(hexToInt(output.cell_output.capacity)) / 100_000_000), 4) + " CKBytes",
lock: new ScriptValue(output.cell_output.lock).hash(),
type: (!!output.cell_output.type) ? new ScriptValue(output.cell_output.type).hash() : null,
data: output.data
};
obj.outputs.push(cell);
}
obj.witnesses = transaction.witnesses;
if(options.showCellDeps)
{
console.log("Cell Deps:");
for(const dep of obj.deps)
{
console.log(" - dep_type: " + dep.dep_type);
console.log(" out_point: " + dep.out_point);
}
}
if(options.showInputs)
{
console.log("Inputs:");
for(const input of obj.inputs)
{
if(options.showInputCapacity)
console.log(" - capacity: " + input.capacity + ` (${input.capacityCkbytes})`);
if(options.showInputLock)
console.log(" lock: " + input.lock);
if(options.showInputType)
console.log(" type: " + input.type);
if(options.showInputOutPoint)
console.log(" out_point: " + input.out_point);
if(options.showInputData)
{
const data = (input.data.length > 66) ? input.data.substr(0, 33) + "..." + input.data.substr(input.data.length - 30) : input.data;
const dataBytes = (data.length > 2) ? (input.data.length-2)/2 : 0;
console.log(` data: ${data} (${formattedNumber(dataBytes)} Bytes)`);
}
}
}
if(options.showOutputs)
{
console.log("Outputs:");
for(const output of obj.outputs)
{
if(options.showOutputCapacity)
console.log(" - capacity: " + output.capacity + ` (${output.capacityCkbytes})`);
if(options.showOutputLock)
console.log(" lock: " + output.lock);
if(options.showOutputType)
console.log(" type: " + output.type);
if(options.showOutputData)
{
const data = (output.data.length > 66) ? output.data.substr(0, 33) + "..." + output.data.substr(output.data.length - 30) : output.data;
const dataBytes = (data.length > 2) ? (output.data.length-2)/2 : 0;
console.log(` data: ${data} (${formattedNumber(dataBytes)} Bytes)`);
}
}
}
if(options.showWitnesses)
{
console.log("Witnesses:");
for(const witness of obj.witnesses)
{
console.log(" - " + witness);
}
}
if(options.showTxFee)
{
const inputCapacity = transaction.inputs.reduce((a, c)=>a+hexToInt(c.cell_output.capacity), 0n);
const outputCapacity = transaction.outputs.reduce((a, c)=>a+hexToInt(c.cell_output.capacity), 0n);
console.log(`TX Fee: ${formattedNumber(inputCapacity - outputCapacity)} Shannons`)
}
console.log();
}
async function getLiveCell(nodeUrl, outPoint, returnData = false)
{
const rpc = new RPC(nodeUrl);
const res = await rpc.get_live_cell({tx_hash: outPoint.tx_hash, index: outPoint.index}, returnData);
if(res.status === "dead")
throw new Error(`Dead cell found at out point: ${outPoint.tx_hash}-${outPoint.index}`);
if(res.status !== "live")
throw new Error(`Live cell not found at out point: ${outPoint.tx_hash}-${outPoint.index}`);
const cell =
{
cell_output:
{
capacity: res.cell.output.capacity,
lock: {code_hash: res.cell.output.lock.code_hash, hash_type: res.cell.output.lock.hash_type, args: res.cell.output.lock.args},
type: (!res.cell.output.type) ? undefined : {code_hash: res.cell.output.type.code_hash, hash_type: res.cell.output.type.hash_type, args: res.cell.output.type.args}
},
out_point:
{
tx_hash: outPoint.tx_hash,
index: outPoint.index
},
data: (returnData) ? res.cell.data.content : "0x"
}
return cell;
}
async function indexerReady(indexer, updateProgress=((_indexerTip, _rpcTip)=>{}), options)
{
const defaults = {blockDifference: 0, timeoutMs: 300_000, recheckMs: 500};
options = {...defaults, ...options};
return new Promise(async (resolve, reject) =>
{
let timedOut = false;
const timeoutTimer = (options.timeoutMs !== 0) ? setTimeout(()=>{timedOut = true;}, options.timeoutMs) : false;
const rpc = new RPC(indexer.uri);
let indexerFailureCount = 0;
let rpcFailureCount = 0;
while(true)
{
if(timedOut)
return reject(Error("Transaction timeout."));
const indexerTipObj = await indexer.tip();
if(!indexerTipObj)
{
if(++indexerFailureCount >= 5)
return reject(Error("Indexer gave an unexpected response."));
await new Promise((resolve)=>setTimeout(resolve, 200));
continue;
}
const rpcResponse = await rpc.get_tip_block_number();
if(!rpcResponse)
{
if(++rpcFailureCount >= 5)
return reject(Error("RPC gave an unexpected response."));
await new Promise((resolve)=>setTimeout(resolve, 200));
continue;
}
const indexerTip = BigInt(indexerTipObj.block_number);
const rpcTip = BigInt(rpcResponse);
if(indexerTip >= (rpcTip - BigInt(options.blockDifference)))
{
if(timeoutTimer)
clearTimeout(timeoutTimer);
break;
}
updateProgress(indexerTip, rpcTip);
await new Promise(resolve=>setTimeout(resolve, options.recheckMs));
}
return resolve();
});
}
async function initializeLumosIndexer(nodeUrl)
{
// Start the Lumos Indexer and wait until it is fully synchronized.
const indexer = new Indexer(nodeUrl, "../indexer-data");
indexer.startForever();
console.log("Indexer is syncing. Please wait.");
await indexerReady(indexer, (indexerTip, rpcTip)=>console.log(`Syncing ${Math.floor(Number(indexerTip)/Number(rpcTip)*10_000)/100}% completed.`), {timeoutMs: 0, recheckMs: 800});
console.log();
return indexer;
}
async function | readFile | identifier_name |
|
index.js | OutputCapacity: true,
showOutputData: false,
showOutputLock: true,
showOutputType: true,
showWitnesses: true,
showTxFee: true
};
options = {...defaults, ...options};
let obj =
{
deps: [],
inputs: [],
outputs: [],
witnesses: []
};
for(const dep of transaction.cellDeps)
{
let cell =
{
dep_type: dep.dep_type,
out_point: dep.out_point.tx_hash + "-" + dep.out_point.index
};
obj.deps.push(cell);
}
for(const input of transaction.inputs)
{
let cell =
{
capacity: formattedNumber(hexToInt(input.cell_output.capacity)) + " Shannons",
capacityCkbytes: formattedNumber((Number(hexToInt(input.cell_output.capacity)) / 100_000_000), 4) + " CKBytes",
lock: new ScriptValue(input.cell_output.lock).hash(),
type: (!!input.cell_output.type) ? new ScriptValue(input.cell_output.type).hash() : null,
out_point: input.out_point.tx_hash + "-" + input.out_point.index,
data: input.data
};
obj.inputs.push(cell);
}
for(const output of transaction.outputs)
{
let cell =
{
capacity: formattedNumber(hexToInt(output.cell_output.capacity)) + " Shannons",
capacityCkbytes: formattedNumber((Number(hexToInt(output.cell_output.capacity)) / 100_000_000), 4) + " CKBytes",
lock: new ScriptValue(output.cell_output.lock).hash(),
type: (!!output.cell_output.type) ? new ScriptValue(output.cell_output.type).hash() : null,
data: output.data
};
obj.outputs.push(cell);
}
obj.witnesses = transaction.witnesses;
if(options.showCellDeps)
{
console.log("Cell Deps:");
for(const dep of obj.deps)
{
console.log(" - dep_type: " + dep.dep_type);
console.log(" out_point: " + dep.out_point);
}
}
if(options.showInputs)
{
console.log("Inputs:");
for(const input of obj.inputs)
{
if(options.showInputCapacity)
console.log(" - capacity: " + input.capacity + ` (${input.capacityCkbytes})`);
if(options.showInputLock)
console.log(" lock: " + input.lock);
if(options.showInputType)
console.log(" type: " + input.type);
if(options.showInputOutPoint)
console.log(" out_point: " + input.out_point);
if(options.showInputData)
{
const data = (input.data.length > 66) ? input.data.substr(0, 33) + "..." + input.data.substr(input.data.length - 30) : input.data;
const dataBytes = (data.length > 2) ? (input.data.length-2)/2 : 0;
console.log(` data: ${data} (${formattedNumber(dataBytes)} Bytes)`);
}
}
}
if(options.showOutputs)
{
console.log("Outputs:");
for(const output of obj.outputs)
{
if(options.showOutputCapacity)
console.log(" - capacity: " + output.capacity + ` (${output.capacityCkbytes})`);
if(options.showOutputLock)
console.log(" lock: " + output.lock);
if(options.showOutputType)
console.log(" type: " + output.type);
if(options.showOutputData)
{
const data = (output.data.length > 66) ? output.data.substr(0, 33) + "..." + output.data.substr(output.data.length - 30) : output.data;
const dataBytes = (data.length > 2) ? (output.data.length-2)/2 : 0;
console.log(` data: ${data} (${formattedNumber(dataBytes)} Bytes)`);
}
}
}
if(options.showWitnesses)
{
console.log("Witnesses:");
for(const witness of obj.witnesses)
{
console.log(" - " + witness);
}
}
if(options.showTxFee)
{
const inputCapacity = transaction.inputs.reduce((a, c)=>a+hexToInt(c.cell_output.capacity), 0n);
const outputCapacity = transaction.outputs.reduce((a, c)=>a+hexToInt(c.cell_output.capacity), 0n);
console.log(`TX Fee: ${formattedNumber(inputCapacity - outputCapacity)} Shannons`)
}
console.log();
}
async function getLiveCell(nodeUrl, outPoint, returnData = false)
{
const rpc = new RPC(nodeUrl);
const res = await rpc.get_live_cell({tx_hash: outPoint.tx_hash, index: outPoint.index}, returnData);
if(res.status === "dead")
throw new Error(`Dead cell found at out point: ${outPoint.tx_hash}-${outPoint.index}`);
if(res.status !== "live")
throw new Error(`Live cell not found at out point: ${outPoint.tx_hash}-${outPoint.index}`);
const cell =
{
cell_output:
{
capacity: res.cell.output.capacity,
lock: {code_hash: res.cell.output.lock.code_hash, hash_type: res.cell.output.lock.hash_type, args: res.cell.output.lock.args},
type: (!res.cell.output.type) ? undefined : {code_hash: res.cell.output.type.code_hash, hash_type: res.cell.output.type.hash_type, args: res.cell.output.type.args}
},
out_point:
{
tx_hash: outPoint.tx_hash,
index: outPoint.index
},
data: (returnData) ? res.cell.data.content : "0x"
}
return cell;
}
async function indexerReady(indexer, updateProgress=((_indexerTip, _rpcTip)=>{}), options)
{
const defaults = {blockDifference: 0, timeoutMs: 300_000, recheckMs: 500};
options = {...defaults, ...options};
return new Promise(async (resolve, reject) =>
{
let timedOut = false;
const timeoutTimer = (options.timeoutMs !== 0) ? setTimeout(()=>{timedOut = true;}, options.timeoutMs) : false;
const rpc = new RPC(indexer.uri);
let indexerFailureCount = 0;
let rpcFailureCount = 0;
while(true)
{
if(timedOut)
return reject(Error("Transaction timeout."));
const indexerTipObj = await indexer.tip();
if(!indexerTipObj)
{
if(++indexerFailureCount >= 5)
return reject(Error("Indexer gave an unexpected response."));
await new Promise((resolve)=>setTimeout(resolve, 200));
continue;
}
const rpcResponse = await rpc.get_tip_block_number();
if(!rpcResponse)
{
if(++rpcFailureCount >= 5)
return reject(Error("RPC gave an unexpected response."));
await new Promise((resolve)=>setTimeout(resolve, 200));
continue;
}
const indexerTip = BigInt(indexerTipObj.block_number);
const rpcTip = BigInt(rpcResponse);
if(indexerTip >= (rpcTip - BigInt(options.blockDifference)))
{
if(timeoutTimer)
clearTimeout(timeoutTimer);
break;
}
updateProgress(indexerTip, rpcTip);
await new Promise(resolve=>setTimeout(resolve, options.recheckMs));
}
return resolve();
});
}
async function initializeLumosIndexer(nodeUrl)
{
// Start the Lumos Indexer and wait until it is fully synchronized.
const indexer = new Indexer(nodeUrl, "../indexer-data");
indexer.startForever();
console.log("Indexer is syncing. Please wait.");
await indexerReady(indexer, (indexerTip, rpcTip)=>console.log(`Syncing ${Math.floor(Number(indexerTip)/Number(rpcTip)*10_000)/100}% completed.`), {timeoutMs: 0, recheckMs: 800});
console.log();
return indexer;
}
async function readFile(filename)
{
const readFile = util.promisify(fs.readFile);
return await readFile(filename);
}
function readFileSync(filename)
{
return fs.readFileSync(filename);
}
async function readFileToHexString(filename)
{
const data = await readFile(filename);
const dataSize = data.length;
const hexString = "0x" + data.toString("hex");
return {hexString, dataSize};
}
function readFileToHexStringSync(filename)
{
const data = readFileSync(filename);
const dataSize = data.length;
const hexString = "0x" + data.toString("hex");
return {hexString, dataSize};
}
async function sendTransaction(nodeUrl, signedTx)
| {
const rpc = new RPC(nodeUrl);
let result;
try
{
result = await rpc.send_transaction(signedTx);
}
catch(error)
{
const regex = /^(\w+): ([\w\s]+) (\{.*\})$/;
const matches = error.message.match(regex);
if(!!matches && matches.length > 0)
{
const category = matches[1];
const type = matches[2];
const json = JSON.parse(matches[3]);
console.log();
| identifier_body |
|
utils.py | """
"""
# If we have a filter, apply it.
if filter_tree is not None:
try:
objs_to_print = filter_objects_from_simple_keypaths(objs, filter_tree.split(','))
except Exception as e:
log.error(e.args[0])
exit(1)
else:
objs_to_print = objs
# Set up a default depth
if depth is None:
depth = 10
# Next, print the tree to the appropriate depth
print_result_as_tree(objs_to_print, depth)
"""
def print_result_as_json(objs, pickle=False):
#print(jsonpickle.encode(objs))
nestedDict = serializer.loads(jsonpickle.encode(objs))
filteredDict = type(nestedDict)()
if(pickle==False):
remove_pickling(nestedDict, filteredDict)
else:
filteredDict = nestedDict
print(serializer.dumps(filteredDict,indent=4))
def remove_pickling(nestedDict, filteredDict):
if type(nestedDict) is dict:
#foreach key, if list, recurse, if dict, recurse, if string recurse unless py/obj is key.
for key in nestedDict:
if key == "py/object":
continue
else:
filteredDict[key] = type(nestedDict[key])()
filteredDict[key] = remove_pickling(nestedDict[key], filteredDict[key])
return filteredDict
if type(nestedDict) is list:
# foreach item
for i in range(len(nestedDict)):
filteredDict.append(type(nestedDict[i])())
filteredDict[i] = remove_pickling(nestedDict[i], filteredDict[i])
return filteredDict
return nestedDict
"""
SDK1.6 Note:
Commenting this as print_tree is not supported in SDK 1.6.
"""
def get_result_as_tree(objs, depth=1, currentDepth=0, lastKey = ""):
print("print_tree is not supported in SDK1.6")
"""stringToReturn = ""
if(currentDepth > depth):
return "<to see more details, increase depth>\n"
if(type(objs) is str or type(objs) is bool or type(objs) is int or type(objs) is type(u'') or objs is None or type(objs) is float):# or (sys.version_info[0]<3 and type(objs) is long)):
return str(objs) + "\n"
if(type(objs) is list):
stringToReturn += "\n"
for i in range(len(objs)):
obj = objs[i]
stringToReturn += currentDepth*" "+get_result_as_tree(obj, depth, currentDepth+1, lastKey)
return stringToReturn
if(isinstance(objs, dict)):
stringToReturn += "\n"
for key in objs:
stringToReturn += currentDepth*" "+key+": "+get_result_as_tree(objs[key], depth, currentDepth+1, key)
return stringToReturn
if (isinstance(objs, tuple)):
return str(objs[0]) + "\n"
if(objs is None):
return stringToReturn
mydict = objs.__dict__
stringToReturn += "\n"
for key in mydict:
stringToReturn += currentDepth*" "
stringToReturn += key+": "+get_result_as_tree(mydict[key], depth, currentDepth+1, key)
return stringToReturn
"""
def filter_objects_from_simple_keypaths(objs, simpleKeyPaths):
# First, we assemble the key paths.
# They start out like this:
# [accouts.username, accounts.initiator_secret.secret, accounts.status]
# and become like this:
# {"accounts":{"username":True, "initiator_secret":{"secret":True}, "status":True}
keyPaths = dict()
for simpleKeyPath in simpleKeyPaths:
currentLevel = keyPaths
keyPathArray = simpleKeyPath.split('.')
for i in range(len(keyPathArray)):
if(i<(len(keyPathArray) - 1)):
if currentLevel.get(keyPathArray[i]) is None:
currentLevel[keyPathArray[i]] = dict()
else:
currentLevel[keyPathArray[i]] = True
currentLevel = currentLevel[keyPathArray[i]]
# Then we pass it in to filter objects.
return filter_objects(objs, keyPaths)
# Keypaths is arranged as follows:
# it is a nested dict with the order of the keys.
def filter_objects(objs, keyPaths):
# Otherwise, we keep recursing deeper.
# Because there are deeper keys, we know that we can go deeper.
# This means we are dealing with either an array or a dict.
# If keyPaths looks like this:
# {"username": True, "volumes": {"Id": True}}
# The keys in this sequence will be username and volumes.
# When we recurse into volumes, the keys will be Id.
finalFilteredObjects = dict()
if keyPaths == True and type(objs) is not list:
return objs
# If we've found a list, we recurse deeper to pull out the objs.
# We do not advance our keyPath recursion because this is just a list.
if type(objs) is list:
# If we have a list of objects, we will need to assemble and return a list of stuff.
filteredObjsDict = [None]*len(objs)
for i in range(len(objs)):
# Each element could be a string, dict, or list.
filteredObjsDict[i] = filter_objects(objs[i], keyPaths)
return filteredObjsDict
dictionaryOfInterest = None
if type(objs) is dict:
dictionaryOfInterest = objs
else:
dictionaryOfInterest = objs.__dict__
for key in keyPaths:
# If we've found a dict, we recurse deeper to pull out the objs.
# Because this is a dict, we must advance our keyPaths recursion.
# Consider the following example:
if key not in dictionaryOfInterest:
raise ValueError("'"+key+"' is not a valid key for this level. Valid keys are: "+','.join(dictionaryOfInterest.keys()))
finalFilteredObjects[key] = filter_objects(dictionaryOfInterest[key], keyPaths[key])
return finalFilteredObjects
def print_result_as_table(objs, keyPaths):
filteredDictionary = filter_objects(objs, keyPaths)
def print_result_as_tree(objs, depth=1):
print(get_result_as_tree(objs, depth))
def establish_connection(ctx):
# Verify that the mvip does not contain the port number:
if ctx.mvip and ":" in ctx.mvip:
ctx.logger.error('Please provide the port using the port parameter.')
exit(1)
cfg = None
# Arguments take precedence regardless of env settings
if ctx.mvip:
if ctx.username is None:
ctx.username = getpass.getpass("Username:")
if ctx.password is None:
ctx.password = getpass.getpass("Password:")
cfg = {'mvip': ctx.mvip,
'username': "b'"+encrypt(ctx.username).decode('utf-8')+"'",
'password': "b'"+encrypt(ctx.password).decode('utf-8')+"'",
'port': ctx.port,
'url': 'https://%s:%s' % (ctx.mvip, ctx.port),
'version': ctx.version,
'verifyssl': ctx.verifyssl,
'timeout': ctx.timeout}
try:
ctx.element = ElementFactory.create(cfg["mvip"],decrypt(cfg["username"]),decrypt(cfg["password"]),port=cfg["port"],version=cfg["version"],verify_ssl=cfg["verifyssl"],timeout=cfg["timeout"])
ctx.version = ctx.element._api_version
cfg["version"] = ctx.element._api_version
except Exception as e:
ctx.logger.error(e.__str__())
exit(1)
# If someone accidentally passed in an argument, but didn't specify everything, throw an error.
elif ctx.username or ctx.password:
ctx.logger.error("In order to manually connect, please provide an mvip, a username, AND a password")
# If someone asked for a given connection or we need to default to using the connection at index 0 if it exists:
else:
if ctx.connectionindex is None and ctx.name is None:
cfg = get_default_connection(ctx)
elif ctx.connectionindex is not None:
connections = get_connections(ctx)
if int(ctx.connectionindex) > (len(connections)-1) or | if as_json and (depth is not None or filter_tree is not None):
log.error("If you choose to print it as json, do not provide a depth or filter. Those are for printing it as a tree.")
exit()
"""
SDK1.6 Note:
Since print_tree is not supported in 1.6, when both the available output formats
json and pickle formats are set to False, change the default output format (pickle) to True.
"""
if as_json == False and as_pickle == False:
as_pickle = True
# If json is true, we print it as json and return:
if as_json == True or as_pickle == True:
print_result_as_json(objs, as_pickle)
return
"""
SDK1.6 Note:
Commenting out these lines as print_tree is not supported in 1.6. | identifier_body |
|
utils.py | in keyPaths:
# If we've found a dict, we recurse deeper to pull out the objs.
# Because this is a dict, we must advance our keyPaths recursion.
# Consider the following example:
if key not in dictionaryOfInterest:
raise ValueError("'"+key+"' is not a valid key for this level. Valid keys are: "+','.join(dictionaryOfInterest.keys()))
finalFilteredObjects[key] = filter_objects(dictionaryOfInterest[key], keyPaths[key])
return finalFilteredObjects
def print_result_as_table(objs, keyPaths):
filteredDictionary = filter_objects(objs, keyPaths)
def print_result_as_tree(objs, depth=1):
print(get_result_as_tree(objs, depth))
def establish_connection(ctx):
# Verify that the mvip does not contain the port number:
if ctx.mvip and ":" in ctx.mvip:
ctx.logger.error('Please provide the port using the port parameter.')
exit(1)
cfg = None
# Arguments take precedence regardless of env settings
if ctx.mvip:
if ctx.username is None:
ctx.username = getpass.getpass("Username:")
if ctx.password is None:
ctx.password = getpass.getpass("Password:")
cfg = {'mvip': ctx.mvip,
'username': "b'"+encrypt(ctx.username).decode('utf-8')+"'",
'password': "b'"+encrypt(ctx.password).decode('utf-8')+"'",
'port': ctx.port,
'url': 'https://%s:%s' % (ctx.mvip, ctx.port),
'version': ctx.version,
'verifyssl': ctx.verifyssl,
'timeout': ctx.timeout}
try:
ctx.element = ElementFactory.create(cfg["mvip"],decrypt(cfg["username"]),decrypt(cfg["password"]),port=cfg["port"],version=cfg["version"],verify_ssl=cfg["verifyssl"],timeout=cfg["timeout"])
ctx.version = ctx.element._api_version
cfg["version"] = ctx.element._api_version
except Exception as e:
ctx.logger.error(e.__str__())
exit(1)
# If someone accidentally passed in an argument, but didn't specify everything, throw an error.
elif ctx.username or ctx.password:
ctx.logger.error("In order to manually connect, please provide an mvip, a username, AND a password")
# If someone asked for a given connection or we need to default to using the connection at index 0 if it exists:
else:
if ctx.connectionindex is None and ctx.name is None:
cfg = get_default_connection(ctx)
elif ctx.connectionindex is not None:
connections = get_connections(ctx)
if int(ctx.connectionindex) > (len(connections)-1) or int(ctx.connectionindex) < (-len(connections)):
ctx.logger.error("Connection "+str(ctx.connectionindex)+" Please provide an index between "+str(-len(connections))+" and "+str(len(connections)-1))
exit(1)
cfg = connections[ctx.connectionindex]
elif ctx.name is not None:
connections = get_connections(ctx)
filteredCfg = [connection for connection in connections if connection["name"] == ctx.name]
if(len(filteredCfg) > 1):
ctx.logger.error("Your connections.csv file has become corrupted. There are two connections of the same name.")
exit()
if(len(filteredCfg) < 1):
ctx.logger.error("Could not find a connection named "+ctx.name)
exit()
cfg = filteredCfg[0]
# If we managed to find the connection we were looking for, we must try to establish the connection.
if cfg is not None:
# Finally, we need to establish our connection via elementfactory:
try:
if int(cfg["port"]) != 443:
address = cfg["mvip"] + ":" + cfg["port"]
else:
address = cfg["mvip"]
ctx.element = ElementFactory.create(address, decrypt(cfg["username"]), decrypt(cfg["password"]), cfg["version"], verify_ssl=cfg["verifyssl"])
if int(cfg["timeout"]) != 30:
ctx.element.timeout(cfg["timeout"])
except Exception as e:
ctx.logger.error(e.__str__())
ctx.logger.error("The connection is corrupt. Run 'sfcli connection prune' to try and remove all broken connections or use 'sfcli connection remove -n name'")
ctx.logger.error(cfg)
exit(1)
# If we want the json output directly from the source, we'll have to override the send request method in the sdk:
# This is so that we can circumvent the python objects and get exactly what the json-rpc returns.
if ctx.json and ctx.element:
def new_send_request(*args, **kwargs):
return ctx.element.__class__.__bases__[0].send_request(ctx.element, return_response_raw=True, *args, **kwargs)
ctx.element.send_request = new_send_request
# The only time it is none is when we're asking for help or we're trying to store a connection.
# If that's not what we're doing, we catch it later.
if cfg is not None:
cfg["port"] = int(cfg["port"])
ctx.cfg = cfg
cfg["name"] = cfg.get("name", "default")
if not ctx.nocache:
write_default_connection(ctx, cfg)
if ctx.element is None:
ctx.logger.error("You must establish at least one connection and specify which you intend to use.")
exit()
# this needs to be atomic.
def get_connections(ctx):
connectionsCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "connections.csv")
connectionsLock = resource_filename(Requirement.parse("solidfire-cli"), "connectionsLock")
if os.path.exists(connectionsCsvLocation):
try:
with FileLock(connectionsLock):
with open(connectionsCsvLocation, 'r') as connectionFile:
connections = list(csv.DictReader(connectionFile, delimiter=','))
except Exception as e:
ctx.logger.error("Problem reading "+connectionsCsvLocation+" because: "+str(e.args)+" Try changing the permissions of that file.")
exit(1)
else:
connections = []
for connection in connections:
connection["version"] = float(connection["version"])
if connection.get("verifyssl") == "True":
connection["verifyssl"] = True
else:
connection["verifyssl"] = False
return connections
def write_connections(ctx, connections):
try:
connectionsCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "connections.csv")
connectionsLock = resource_filename(Requirement.parse("solidfire-cli"), "connectionsLock")
with open(connectionsCsvLocation, 'w') as f:
with FileLock(connectionsLock):
w = csv.DictWriter(f, ["name","mvip","port","username","password","version","url","verifyssl","timeout"], lineterminator='\n')
w.writeheader()
for connection in connections:
if connection is not None:
w.writerow(connection)
except Exception as e:
ctx.logger.error("Problem writing "+ connectionsCsvLocation + " " + str(e.args)+" Try changing the permissions of that file.")
exit(1)
def get_default_connection(ctx):
connectionCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "default_connection.csv")
if os.path.exists(connectionCsvLocation):
defaultLockLocation = resource_filename(Requirement.parse("solidfire-cli"), "defaultLock")
try:
with FileLock(defaultLockLocation):
with open(connectionCsvLocation) as connectionFile:
connection = list(csv.DictReader(connectionFile, delimiter=','))
except Exception as e:
ctx.logger.error("Problem reading "+connectionCsvLocation+" because: "+str(e.args)+" Try changing the permissions of that file or specifying credentials.")
exit(1)
if len(connection)>0:
connection[0]["version"] = float(connection[0]["version"])
if(connection[0]["verifyssl"] == "True"):
connection[0]["verifyssl"] = True
else:
connection[0]["verifyssl"] = False
return connection[0]
else:
os.remove(defaultLockLocation)
ctx.logger.error("Please provide connection information. There is no connection info in cache at this time.")
exit(1)
else:
ctx.logger.error("Please provide connection information. There is no connection info in cache at this time.")
exit(1)
def write_default_connection(ctx, connection):
connectionCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "default_connection.csv")
try:
defaultLockLocation = resource_filename(Requirement.parse("solidfire-cli"), "defaultLock")
with FileLock(defaultLockLocation):
with open(connectionCsvLocation, 'w') as f:
w = csv.DictWriter(f, ["name", "mvip", "port", "username", "password", "version", "url", "verifyssl", "timeout"],
lineterminator='\n')
w.writeheader()
w.writerow(connection)
except Exception as e:
ctx.logger.warning("Problem writing "+ connectionCsvLocation + " " + str(e.args)+" Try using changing the permissions of that file or using the --nocache flag.")
# WARNING! This doesn't actually give us total security. It only gives us obscurity.
def encrypt(sensitive_data):
cipher = ARC4.new(socket.gethostname().encode('utf-8') + "SOLIDFIRE".encode('utf-8'))
encoded = base64.b64encode(cipher.encrypt(sensitive_data.encode('utf-8')))
return encoded
def | decrypt | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.